Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 3.19-rc5 into staging-next

We want the staging fixes in this branch as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+6263 -4343
+1
.mailmap
··· 51 51 Greg Kroah-Hartman <greg@kroah.com> 52 52 Henk Vergonet <Henk.Vergonet@gmail.com> 53 53 Henrik Kretzschmar <henne@nachtwindheim.de> 54 + Henrik Rydberg <rydberg@bitmath.org> 54 55 Herbert Xu <herbert@gondor.apana.org.au> 55 56 Jacob Shin <Jacob.Shin@amd.com> 56 57 James Bottomley <jejb@mulgrave.(none)>
+15
Documentation/ABI/testing/sysfs-class-mei
··· 14 14 The /sys/class/mei/meiN directory is created for 15 15 each probed mei device 16 16 17 + What: /sys/class/mei/meiN/fw_status 18 + Date: Nov 2014 19 + KernelVersion: 3.19 20 + Contact: Tomas Winkler <tomas.winkler@intel.com> 21 + Description: Display fw status registers content 22 + 23 + The ME FW writes its status information into fw status 24 + registers for BIOS and OS to monitor fw health. 25 + 26 + The register contains running state, power management 27 + state, error codes, and others. The way the registers 28 + are decoded depends on PCH or SoC generation. 29 + Also number of registers varies between 1 and 6 30 + depending on generation. 31 +
+7 -3
Documentation/devicetree/bindings/input/gpio-keys.txt
··· 10 10 Each button (key) is represented as a sub-node of "gpio-keys": 11 11 Subnode properties: 12 12 13 + - gpios: OF device-tree gpio specification. 14 + - interrupts: the interrupt line for that input. 13 15 - label: Descriptive name of the key. 14 16 - linux,code: Keycode to emit. 15 17 16 - Required mutual exclusive subnode-properties: 17 - - gpios: OF device-tree gpio specification. 18 - - interrupts: the interrupt line for that input 18 + Note that either "interrupts" or "gpios" properties can be omitted, but not 19 + both at the same time. Specifying both properties is allowed. 19 20 20 21 Optional subnode-properties: 21 22 - linux,input-type: Specify event type this button/key generates. ··· 24 23 - debounce-interval: Debouncing interval time in milliseconds. 25 24 If not specified defaults to 5. 26 25 - gpio-key,wakeup: Boolean, button can wake-up the system. 26 + - linux,can-disable: Boolean, indicates that button is connected 27 + to dedicated (not shared) interrupt which can be disabled to 28 + suppress events from the button. 27 29 28 30 Example nodes: 29 31
+2
Documentation/devicetree/bindings/input/stmpe-keypad.txt
··· 8 8 - debounce-interval : Debouncing interval time in milliseconds 9 9 - st,scan-count : Scanning cycles elapsed before key data is updated 10 10 - st,no-autorepeat : If specified device will not autorepeat 11 + - keypad,num-rows : See ./matrix-keymap.txt 12 + - keypad,num-columns : See ./matrix-keymap.txt 11 13 12 14 Example: 13 15
+2
Documentation/networking/ip-sysctl.txt
··· 66 66 route/max_size - INTEGER 67 67 Maximum number of routes allowed in the kernel. Increase 68 68 this when using large numbers of interfaces and/or routes. 69 + From linux kernel 3.6 onwards, this is deprecated for ipv4 70 + as route cache is no longer used. 69 71 70 72 neigh/default/gc_thresh1 - INTEGER 71 73 Minimum number of entries to keep. Garbage collector will not
+12 -37
Documentation/target/tcm_mod_builder.py
··· 389 389 buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" 390 390 buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" 391 391 buf += " .close_session = " + fabric_mod_name + "_close_session,\n" 392 - buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" 393 - buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" 394 - buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" 395 392 buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" 396 393 buf += " .sess_get_initiator_sid = NULL,\n" 397 394 buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" ··· 399 402 buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" 400 403 buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" 401 404 buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" 402 - buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" 405 + buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" 403 406 buf += " /*\n" 404 407 buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" 405 408 buf += " */\n" ··· 425 428 buf += " /*\n" 426 429 buf += " * Register the top level struct config_item_type with TCM core\n" 427 430 buf += " */\n" 428 - buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" 431 + buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" 429 432 buf += " if (IS_ERR(fabric)) {\n" 430 433 buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" 431 434 buf += " return PTR_ERR(fabric);\n" ··· 592 595 if re.search('get_fabric_name', fo): 593 596 buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" 594 597 buf += "{\n" 595 - buf += " return \"" + fabric_mod_name[4:] + "\";\n" 598 + buf += " return \"" + fabric_mod_name + "\";\n" 596 599 buf += "}\n\n" 597 600 bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" 598 601 continue ··· 817 820 buf += "}\n\n" 818 821 bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" 819 822 820 - if re.search('stop_session\)\(', fo): 821 - buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" 822 - buf += "{\n" 823 - buf += " return;\n" 824 - buf += "}\n\n" 825 - bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" 826 - 827 - if re.search('fall_back_to_erl0\)\(', fo): 828 - buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" 829 - buf += "{\n" 830 - buf += " return;\n" 831 - buf += "}\n\n" 832 - bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" 833 - 834 - if re.search('sess_logged_in\)\(', fo): 835 - buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" 836 - buf += "{\n" 837 - buf += " return 0;\n" 838 - buf += "}\n\n" 839 - bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" 840 - 841 823 if re.search('sess_get_index\)\(', fo): 842 824 buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" 843 825 buf += "{\n" ··· 874 898 bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" 875 899 876 900 if re.search('queue_tm_rsp\)\(', fo): 877 - buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 901 + buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" 878 902 buf += "{\n" 879 - buf += " return 0;\n" 903 + buf += " return;\n" 880 904 buf += "}\n\n" 881 - bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 905 + bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" 882 906 883 - if re.search('is_state_remove\)\(', fo): 884 - buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" 907 + if re.search('aborted_task\)\(', fo): 908 + buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" 885 909 buf += "{\n" 886 - buf += " return 0;\n" 910 + buf += " return;\n" 887 911 buf += "}\n\n" 888 - bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" 889 - 912 + bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" 890 913 891 914 ret = p.write(buf) 892 915 if ret: ··· 993 1018 tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) 994 1019 tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) 995 1020 996 - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") 1021 + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") 997 1022 if input == "yes" or input == "y": 998 1023 tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) 999 1024 1000 - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") 1025 + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") 1001 1026 if input == "yes" or input == "y": 1002 1027 tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) 1003 1028
+13 -2
Documentation/thermal/cpu-cooling-api.txt
··· 3 3 4 4 Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> 5 5 6 - Updated: 12 May 2012 6 + Updated: 6 Jan 2015 7 7 8 8 Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 9 9 ··· 25 25 26 26 clip_cpus: cpumask of cpus where the frequency constraints will happen. 27 27 28 - 1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 28 + 1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register( 29 + struct device_node *np, const struct cpumask *clip_cpus) 30 + 31 + This interface function registers the cpufreq cooling device with 32 + the name "thermal-cpufreq-%x" linking it with a device tree node, in 33 + order to bind it via the thermal DT code. This api can support multiple 34 + instances of cpufreq cooling devices. 35 + 36 + np: pointer to the cooling device device tree node 37 + clip_cpus: cpumask of cpus where the frequency constraints will happen. 38 + 39 + 1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 29 40 30 41 This interface function unregisters the "thermal-cpufreq-%x" cooling device. 31 42
+21 -11
MAINTAINERS
··· 724 724 F: drivers/char/apm-emulation.c 725 725 726 726 APPLE BCM5974 MULTITOUCH DRIVER 727 - M: Henrik Rydberg <rydberg@euromail.se> 727 + M: Henrik Rydberg <rydberg@bitmath.org> 728 728 L: linux-input@vger.kernel.org 729 - S: Maintained 729 + S: Odd fixes 730 730 F: drivers/input/mouse/bcm5974.c 731 731 732 732 APPLE SMC DRIVER 733 - M: Henrik Rydberg <rydberg@euromail.se> 733 + M: Henrik Rydberg <rydberg@bitmath.org> 734 734 L: lm-sensors@lm-sensors.org 735 - S: Maintained 735 + S: Odd fixes 736 736 F: drivers/hwmon/applesmc.c 737 737 738 738 APPLETALK NETWORK LAYER ··· 2259 2259 BTRFS FILE SYSTEM 2260 2260 M: Chris Mason <clm@fb.com> 2261 2261 M: Josef Bacik <jbacik@fb.com> 2262 + M: David Sterba <dsterba@suse.cz> 2262 2263 L: linux-btrfs@vger.kernel.org 2263 2264 W: http://btrfs.wiki.kernel.org/ 2264 2265 Q: http://patchwork.kernel.org/project/linux-btrfs/list/ ··· 3183 3182 Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ 3184 3183 S: Maintained 3185 3184 F: drivers/dma/ 3186 - F: include/linux/dma* 3185 + F: include/linux/dmaengine.h 3187 3186 F: Documentation/dmaengine/ 3188 3187 T: git git://git.infradead.org/users/vkoul/slave-dma.git 3189 3188 ··· 4749 4748 F: drivers/scsi/ipr.* 4750 4749 4751 4750 IBM Power Virtual Ethernet Device Driver 4752 - M: Santiago Leon <santil@linux.vnet.ibm.com> 4751 + M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> 4753 4752 L: netdev@vger.kernel.org 4754 4753 S: Supported 4755 4754 F: drivers/net/ethernet/ibm/ibmveth.* ··· 4941 4940 F: include/linux/input/ 4942 4941 4943 4942 INPUT MULTITOUCH (MT) PROTOCOL 4944 - M: Henrik Rydberg <rydberg@euromail.se> 4943 + M: Henrik Rydberg <rydberg@bitmath.org> 4945 4944 L: linux-input@vger.kernel.org 4946 4945 T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git 4947 - S: Maintained 4946 + S: Odd fixes 4948 4947 F: Documentation/input/multi-touch-protocol.txt 4949 4948 F: drivers/input/input-mt.c 4950 4949 K: \b(ABS|SYN)_MT_ ··· 5279 5278 W: www.open-iscsi.org 5280 5279 Q: http://patchwork.kernel.org/project/linux-rdma/list/ 5281 5280 F: drivers/infiniband/ulp/iser/ 5281 + 5282 + ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 5283 + M: Sagi Grimberg <sagig@mellanox.com> 5284 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 5285 + L: linux-rdma@vger.kernel.org 5286 + L: target-devel@vger.kernel.org 5287 + S: Supported 5288 + W: http://www.linux-iscsi.org 5289 + F: drivers/infiniband/ulp/isert 5282 5290 5283 5291 ISDN SUBSYSTEM 5284 5292 M: Karsten Keil <isdn@linux-pingi.de> ··· 7747 7737 F: drivers/scsi/qla2xxx/ 7748 7738 7749 7739 QLOGIC QLA4XXX iSCSI DRIVER 7750 - M: Vikas Chaudhary <vikas.chaudhary@qlogic.com> 7751 - M: iscsi-driver@qlogic.com 7740 + M: QLogic-Storage-Upstream@qlogic.com 7752 7741 L: linux-scsi@vger.kernel.org 7753 7742 S: Supported 7754 7743 F: Documentation/scsi/LICENSE.qla4xxx ··· 9542 9533 TI BANDGAP AND THERMAL DRIVER 9543 9534 M: Eduardo Valentin <edubezval@gmail.com> 9544 9535 L: linux-pm@vger.kernel.org 9545 - S: Supported 9536 + L: linux-omap@vger.kernel.org 9537 + S: Maintained 9546 9538 F: drivers/thermal/ti-soc-thermal/ 9547 9539 9548 9540 TI CLOCK DRIVER
+2 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 19 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc2 4 + EXTRAVERSION = -rc5 5 5 NAME = Diseased Newt 6 6 7 7 # *DOCUMENTATION* ··· 391 391 # Needed to be compatible with the O= option 392 392 LINUXINCLUDE := \ 393 393 -I$(srctree)/arch/$(hdr-arch)/include \ 394 + -Iarch/$(hdr-arch)/include/generated/uapi \ 394 395 -Iarch/$(hdr-arch)/include/generated \ 395 396 $(if $(KBUILD_SRC), -I$(srctree)/include) \ 396 397 -Iinclude \
-24
arch/arm/boot/dts/armada-370-db.dts
··· 203 203 compatible = "linux,spdif-dir"; 204 204 }; 205 205 }; 206 - 207 - &pinctrl { 208 - /* 209 - * These pins might be muxed as I2S by 210 - * the bootloader, but it conflicts 211 - * with the real I2S pins that are 212 - * muxed using i2s_pins. We must mux 213 - * those pins to a function other than 214 - * I2S. 215 - */ 216 - pinctrl-0 = <&hog_pins1 &hog_pins2>; 217 - pinctrl-names = "default"; 218 - 219 - hog_pins1: hog-pins1 { 220 - marvell,pins = "mpp6", "mpp8", "mpp10", 221 - "mpp12", "mpp13"; 222 - marvell,function = "gpio"; 223 - }; 224 - 225 - hog_pins2: hog-pins2 { 226 - marvell,pins = "mpp5", "mpp7", "mpp9"; 227 - marvell,function = "gpo"; 228 - }; 229 - };
+2
arch/arm/boot/dts/at91sam9263.dtsi
··· 953 953 interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>; 954 954 pinctrl-names = "default"; 955 955 pinctrl-0 = <&pinctrl_fb>; 956 + clocks = <&lcd_clk>, <&lcd_clk>; 957 + clock-names = "lcdc_clk", "hclk"; 956 958 status = "disabled"; 957 959 }; 958 960
+2
arch/arm/boot/dts/berlin2q-marvell-dmp.dts
··· 65 65 }; 66 66 67 67 &sdhci2 { 68 + broken-cd; 69 + bus-width = <8>; 68 70 non-removable; 69 71 status = "okay"; 70 72 };
+32 -31
arch/arm/boot/dts/berlin2q.dtsi
··· 83 83 compatible = "mrvl,pxav3-mmc"; 84 84 reg = <0xab1000 0x200>; 85 85 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 86 - clocks = <&chip CLKID_SDIO1XIN>; 86 + clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>; 87 + clock-names = "io", "core"; 87 88 status = "disabled"; 88 89 }; 89 90 ··· 349 348 interrupt-parent = <&gic>; 350 349 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; 351 350 }; 352 - 353 - gpio4: gpio@5000 { 354 - compatible = "snps,dw-apb-gpio"; 355 - reg = <0x5000 0x400>; 356 - #address-cells = <1>; 357 - #size-cells = <0>; 358 - 359 - porte: gpio-port@4 { 360 - compatible = "snps,dw-apb-gpio-port"; 361 - gpio-controller; 362 - #gpio-cells = <2>; 363 - snps,nr-gpios = <32>; 364 - reg = <0>; 365 - }; 366 - }; 367 - 368 - gpio5: gpio@c000 { 369 - compatible = "snps,dw-apb-gpio"; 370 - reg = <0xc000 0x400>; 371 - #address-cells = <1>; 372 - #size-cells = <0>; 373 - 374 - portf: gpio-port@5 { 375 - compatible = "snps,dw-apb-gpio-port"; 376 - gpio-controller; 377 - #gpio-cells = <2>; 378 - snps,nr-gpios = <32>; 379 - reg = <0>; 380 - }; 381 - }; 382 351 }; 383 352 384 353 chip: chip-control@ea0000 { ··· 437 466 ranges = <0 0xfc0000 0x10000>; 438 467 interrupt-parent = <&sic>; 439 468 469 + sm_gpio1: gpio@5000 { 470 + compatible = "snps,dw-apb-gpio"; 471 + reg = <0x5000 0x400>; 472 + #address-cells = <1>; 473 + #size-cells = <0>; 474 + 475 + portf: gpio-port@5 { 476 + compatible = "snps,dw-apb-gpio-port"; 477 + gpio-controller; 478 + #gpio-cells = <2>; 479 + snps,nr-gpios = <32>; 480 + reg = <0>; 481 + }; 482 + }; 483 + 440 484 i2c2: i2c@7000 { 441 485 compatible = "snps,designware-i2c"; 442 486 #address-cells = <1>; ··· 500 514 pinctrl-0 = <&uart1_pmux>; 501 515 pinctrl-names = "default"; 502 516 status = "disabled"; 517 + }; 518 + 519 + sm_gpio0: gpio@c000 { 520 + compatible = "snps,dw-apb-gpio"; 521 + reg = <0xc000 0x400>; 522 + #address-cells = <1>; 523 + #size-cells = <0>; 524 + 525 + porte: gpio-port@4 { 526 + compatible = "snps,dw-apb-gpio-port"; 527 + gpio-controller; 528 + #gpio-cells = <2>; 529 + snps,nr-gpios = <32>; 530 + reg = <0>; 531 + }; 503 532 }; 504 533 505 534 sysctrl: pin-controller@d000 {
+5 -5
arch/arm/boot/dts/dra7-evm.dts
··· 499 499 }; 500 500 partition@5 { 501 501 label = "QSPI.u-boot-spl-os"; 502 - reg = <0x00140000 0x00010000>; 502 + reg = <0x00140000 0x00080000>; 503 503 }; 504 504 partition@6 { 505 505 label = "QSPI.u-boot-env"; 506 - reg = <0x00150000 0x00010000>; 506 + reg = <0x001c0000 0x00010000>; 507 507 }; 508 508 partition@7 { 509 509 label = "QSPI.u-boot-env.backup1"; 510 - reg = <0x00160000 0x0010000>; 510 + reg = <0x001d0000 0x0010000>; 511 511 }; 512 512 partition@8 { 513 513 label = "QSPI.kernel"; 514 - reg = <0x00170000 0x0800000>; 514 + reg = <0x001e0000 0x0800000>; 515 515 }; 516 516 partition@9 { 517 517 label = "QSPI.file-system"; 518 - reg = <0x00970000 0x01690000>; 518 + reg = <0x009e0000 0x01620000>; 519 519 }; 520 520 }; 521 521 };
+1 -1
arch/arm/boot/dts/exynos5250.dtsi
··· 736 736 737 737 dp_phy: video-phy@10040720 { 738 738 compatible = "samsung,exynos5250-dp-video-phy"; 739 - reg = <0x10040720 4>; 739 + samsung,pmu-syscon = <&pmu_system_controller>; 740 740 #phy-cells = <0>; 741 741 }; 742 742
+4
arch/arm/boot/dts/exynos5420-arndale-octa.dts
··· 372 372 &usbdrd_dwc3_1 { 373 373 dr_mode = "host"; 374 374 }; 375 + 376 + &cci { 377 + status = "disabled"; 378 + };
+3 -3
arch/arm/boot/dts/exynos5420.dtsi
··· 120 120 }; 121 121 }; 122 122 123 - cci@10d20000 { 123 + cci: cci@10d20000 { 124 124 compatible = "arm,cci-400"; 125 125 #address-cells = <1>; 126 126 #size-cells = <1>; ··· 503 503 }; 504 504 505 505 dp_phy: video-phy@10040728 { 506 - compatible = "samsung,exynos5250-dp-video-phy"; 507 - reg = <0x10040728 4>; 506 + compatible = "samsung,exynos5420-dp-video-phy"; 507 + samsung,pmu-syscon = <&pmu_system_controller>; 508 508 #phy-cells = <0>; 509 509 }; 510 510
+1 -1
arch/arm/boot/dts/imx25.dtsi
··· 162 162 #size-cells = <0>; 163 163 compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; 164 164 reg = <0x43fa4000 0x4000>; 165 - clocks = <&clks 62>, <&clks 62>; 165 + clocks = <&clks 78>, <&clks 78>; 166 166 clock-names = "ipg", "per"; 167 167 interrupts = <14>; 168 168 status = "disabled";
+5 -17
arch/arm/boot/dts/imx51-babbage.dts
··· 127 127 #address-cells = <1>; 128 128 #size-cells = <0>; 129 129 130 - reg_usbh1_vbus: regulator@0 { 131 - compatible = "regulator-fixed"; 132 - pinctrl-names = "default"; 133 - pinctrl-0 = <&pinctrl_usbh1reg>; 134 - reg = <0>; 135 - regulator-name = "usbh1_vbus"; 136 - regulator-min-microvolt = <5000000>; 137 - regulator-max-microvolt = <5000000>; 138 - gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>; 139 - enable-active-high; 140 - }; 141 - 142 - reg_usbotg_vbus: regulator@1 { 130 + reg_hub_reset: regulator@0 { 143 131 compatible = "regulator-fixed"; 144 132 pinctrl-names = "default"; 145 133 pinctrl-0 = <&pinctrl_usbotgreg>; 146 - reg = <1>; 147 - regulator-name = "usbotg_vbus"; 134 + reg = <0>; 135 + regulator-name = "hub_reset"; 148 136 regulator-min-microvolt = <5000000>; 149 137 regulator-max-microvolt = <5000000>; 150 138 gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>; ··· 164 176 reg = <0>; 165 177 clocks = <&clks IMX5_CLK_DUMMY>; 166 178 clock-names = "main_clk"; 179 + reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; 167 180 }; 168 181 }; 169 182 }; ··· 408 419 &usbh1 { 409 420 pinctrl-names = "default"; 410 421 pinctrl-0 = <&pinctrl_usbh1>; 411 - vbus-supply = <&reg_usbh1_vbus>; 422 + vbus-supply = <&reg_hub_reset>; 412 423 fsl,usbphy = <&usbh1phy>; 413 424 phy_type = "ulpi"; 414 425 status = "okay"; ··· 418 429 dr_mode = "otg"; 419 430 disable-over-current; 420 431 phy_type = "utmi_wide"; 421 - vbus-supply = <&reg_usbotg_vbus>; 422 432 status = "okay"; 423 433 }; 424 434
+2 -2
arch/arm/boot/dts/imx6qdl.dtsi
··· 335 335 vpu: vpu@02040000 { 336 336 compatible = "cnm,coda960"; 337 337 reg = <0x02040000 0x3c000>; 338 - interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>, 339 - <0 12 IRQ_TYPE_LEVEL_HIGH>; 338 + interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>, 339 + <0 3 IRQ_TYPE_LEVEL_HIGH>; 340 340 interrupt-names = "bit", "jpeg"; 341 341 clocks = <&clks IMX6QDL_CLK_VPU_AXI>, 342 342 <&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
+15
arch/arm/boot/dts/imx6sx-sdb.dts
··· 159 159 pinctrl-0 = <&pinctrl_enet1>; 160 160 phy-supply = <&reg_enet_3v3>; 161 161 phy-mode = "rgmii"; 162 + phy-handle = <&ethphy1>; 162 163 status = "okay"; 164 + 165 + mdio { 166 + #address-cells = <1>; 167 + #size-cells = <0>; 168 + 169 + ethphy1: ethernet-phy@0 { 170 + reg = <0>; 171 + }; 172 + 173 + ethphy2: ethernet-phy@1 { 174 + reg = <1>; 175 + }; 176 + }; 163 177 }; 164 178 165 179 &fec2 { 166 180 pinctrl-names = "default"; 167 181 pinctrl-0 = <&pinctrl_enet2>; 168 182 phy-mode = "rgmii"; 183 + phy-handle = <&ethphy2>; 169 184 status = "okay"; 170 185 }; 171 186
+1
arch/arm/boot/dts/ls1021a.dtsi
··· 142 142 scfg: scfg@1570000 { 143 143 compatible = "fsl,ls1021a-scfg", "syscon"; 144 144 reg = <0x0 0x1570000 0x0 0x10000>; 145 + big-endian; 145 146 }; 146 147 147 148 clockgen: clocking@1ee1000 {
+1 -3
arch/arm/boot/dts/omap3-n900.dts
··· 700 700 }; 701 701 }; 702 702 703 + /* Ethernet is on some early development boards and qemu */ 703 704 ethernet@gpmc { 704 705 compatible = "smsc,lan91c94"; 705 - 706 - status = "disabled"; 707 - 708 706 interrupt-parent = <&gpio2>; 709 707 interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ 710 708 reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */
+30
arch/arm/boot/dts/rk3288-evb.dtsi
··· 155 155 }; 156 156 157 157 &pinctrl { 158 + pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma { 159 + drive-strength = <8>; 160 + }; 161 + 162 + pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma { 163 + bias-pull-up; 164 + drive-strength = <8>; 165 + }; 166 + 158 167 backlight { 159 168 bl_en: bl-en { 160 169 rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>; ··· 179 170 pmic { 180 171 pmic_int: pmic-int { 181 172 rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>; 173 + }; 174 + }; 175 + 176 + sdmmc { 177 + /* 178 + * Default drive strength isn't enough to achieve even 179 + * high-speed mode on EVB board so bump up to 8ma. 180 + */ 181 + sdmmc_bus4: sdmmc-bus4 { 182 + rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, 183 + <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, 184 + <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, 185 + <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>; 186 + }; 187 + 188 + sdmmc_clk: sdmmc-clk { 189 + rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>; 190 + }; 191 + 192 + sdmmc_cmd: sdmmc-cmd { 193 + rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>; 182 194 }; 183 195 }; 184 196
+1 -1
arch/arm/boot/dts/sama5d3xmb.dtsi
··· 176 176 "Headphone Jack", "HPOUTR", 177 177 "IN2L", "Line In Jack", 178 178 "IN2R", "Line In Jack", 179 - "MICBIAS", "IN1L", 179 + "Mic", "MICBIAS", 180 180 "IN1L", "Mic"; 181 181 182 182 atmel,ssc-controller = <&ssc0>;
+1 -1
arch/arm/boot/dts/sama5d4.dtsi
··· 1008 1008 1009 1009 pit: timer@fc068630 { 1010 1010 compatible = "atmel,at91sam9260-pit"; 1011 - reg = <0xfc068630 0xf>; 1011 + reg = <0xfc068630 0x10>; 1012 1012 interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>; 1013 1013 clocks = <&h32ck>; 1014 1014 };
+4 -4
arch/arm/boot/dts/ste-nomadik-nhk15.dts
··· 25 25 stmpe2401_1 { 26 26 stmpe2401_1_nhk_mode: stmpe2401_1_nhk { 27 27 nhk_cfg1 { 28 - ste,pins = "GPIO76_B20"; // IRQ line 28 + pins = "GPIO76_B20"; // IRQ line 29 29 ste,input = <0>; 30 30 }; 31 31 nhk_cfg2 { 32 - ste,pins = "GPIO77_B8"; // reset line 32 + pins = "GPIO77_B8"; // reset line 33 33 ste,output = <1>; 34 34 }; 35 35 }; ··· 37 37 stmpe2401_2 { 38 38 stmpe2401_2_nhk_mode: stmpe2401_2_nhk { 39 39 nhk_cfg1 { 40 - ste,pins = "GPIO78_A8"; // IRQ line 40 + pins = "GPIO78_A8"; // IRQ line 41 41 ste,input = <0>; 42 42 }; 43 43 nhk_cfg2 { 44 - ste,pins = "GPIO79_C9"; // reset line 44 + pins = "GPIO79_C9"; // reset line 45 45 ste,output = <1>; 46 46 }; 47 47 };
+15
arch/arm/boot/dts/vf610-twr.dts
··· 129 129 130 130 &fec0 { 131 131 phy-mode = "rmii"; 132 + phy-handle = <&ethphy0>; 132 133 pinctrl-names = "default"; 133 134 pinctrl-0 = <&pinctrl_fec0>; 134 135 status = "okay"; 136 + 137 + mdio { 138 + #address-cells = <1>; 139 + #size-cells = <0>; 140 + 141 + ethphy0: ethernet-phy@0 { 142 + reg = <0>; 143 + }; 144 + 145 + ethphy1: ethernet-phy@1 { 146 + reg = <1>; 147 + }; 148 + }; 135 149 }; 136 150 137 151 &fec1 { 138 152 phy-mode = "rmii"; 153 + phy-handle = <&ethphy1>; 139 154 pinctrl-names = "default"; 140 155 pinctrl-0 = <&pinctrl_fec1>; 141 156 status = "okay";
+17 -1
arch/arm/configs/exynos_defconfig
··· 84 84 CONFIG_POWER_SUPPLY=y 85 85 CONFIG_BATTERY_SBS=y 86 86 CONFIG_CHARGER_TPS65090=y 87 - # CONFIG_HWMON is not set 87 + CONFIG_HWMON=y 88 + CONFIG_SENSORS_LM90=y 88 89 CONFIG_THERMAL=y 89 90 CONFIG_EXYNOS_THERMAL=y 90 91 CONFIG_EXYNOS_THERMAL_CORE=y ··· 110 109 CONFIG_REGULATOR_S2MPS11=y 111 110 CONFIG_REGULATOR_S5M8767=y 112 111 CONFIG_REGULATOR_TPS65090=y 112 + CONFIG_DRM=y 113 + CONFIG_DRM_BRIDGE=y 114 + CONFIG_DRM_PTN3460=y 115 + CONFIG_DRM_PS8622=y 116 + CONFIG_DRM_EXYNOS=y 117 + CONFIG_DRM_EXYNOS_FIMD=y 118 + CONFIG_DRM_EXYNOS_DP=y 119 + CONFIG_DRM_PANEL=y 120 + CONFIG_DRM_PANEL_SIMPLE=y 113 121 CONFIG_FB=y 114 122 CONFIG_FB_MODE_HELPERS=y 115 123 CONFIG_FB_SIMPLE=y 116 124 CONFIG_EXYNOS_VIDEO=y 117 125 CONFIG_EXYNOS_MIPI_DSI=y 126 + CONFIG_BACKLIGHT_LCD_SUPPORT=y 127 + CONFIG_LCD_CLASS_DEVICE=y 128 + CONFIG_LCD_PLATFORM=y 129 + CONFIG_BACKLIGHT_CLASS_DEVICE=y 130 + CONFIG_BACKLIGHT_GENERIC=y 131 + CONFIG_BACKLIGHT_PWM=y 118 132 CONFIG_FRAMEBUFFER_CONSOLE=y 119 133 CONFIG_FONTS=y 120 134 CONFIG_FONT_7x14=y
+1
arch/arm/configs/multi_v7_defconfig
··· 338 338 CONFIG_USB_XHCI_HCD=y 339 339 CONFIG_USB_XHCI_MVEBU=y 340 340 CONFIG_USB_EHCI_HCD=y 341 + CONFIG_USB_EHCI_EXYNOS=y 341 342 CONFIG_USB_EHCI_TEGRA=y 342 343 CONFIG_USB_EHCI_HCD_STI=y 343 344 CONFIG_USB_EHCI_HCD_PLATFORM=y
+1 -1
arch/arm/configs/omap2plus_defconfig
··· 68 68 CONFIG_CPU_FREQ_GOV_POWERSAVE=y 69 69 CONFIG_CPU_FREQ_GOV_USERSPACE=y 70 70 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y 71 - CONFIG_GENERIC_CPUFREQ_CPU0=y 71 + CONFIG_CPUFREQ_DT=y 72 72 # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set 73 73 CONFIG_CPU_IDLE=y 74 74 CONFIG_BINFMT_MISC=y
+1
arch/arm/include/uapi/asm/unistd.h
··· 413 413 #define __NR_getrandom (__NR_SYSCALL_BASE+384) 414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 415 #define __NR_bpf (__NR_SYSCALL_BASE+386) 416 + #define __NR_execveat (__NR_SYSCALL_BASE+387) 416 417 417 418 /* 418 419 * The following SWIs are ARM private.
+1
arch/arm/kernel/calls.S
··· 396 396 CALL(sys_getrandom) 397 397 /* 385 */ CALL(sys_memfd_create) 398 398 CALL(sys_bpf) 399 + CALL(sys_execveat) 399 400 #ifndef syscalls_counted 400 401 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 401 402 #define syscalls_counted
+8
arch/arm/kernel/perf_regs.c
··· 28 28 { 29 29 return PERF_SAMPLE_REGS_ABI_32; 30 30 } 31 + 32 + void perf_get_regs_user(struct perf_regs *regs_user, 33 + struct pt_regs *regs, 34 + struct pt_regs *regs_user_copy) 35 + { 36 + regs_user->regs = task_pt_regs(current); 37 + regs_user->abi = perf_reg_abi(current); 38 + }
+9
arch/arm/kernel/setup.c
··· 1046 1046 seq_printf(m, "model name\t: %s rev %d (%s)\n", 1047 1047 cpu_name, cpuid & 15, elf_platform); 1048 1048 1049 + #if defined(CONFIG_SMP) 1050 + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1051 + per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1052 + (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1053 + #else 1054 + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1055 + loops_per_jiffy / (500000/HZ), 1056 + (loops_per_jiffy / (5000/HZ)) % 100); 1057 + #endif 1049 1058 /* dump out the processor features */ 1050 1059 seq_puts(m, "Features\t: "); 1051 1060
+12
arch/arm/kernel/smp.c
··· 387 387 388 388 void __init smp_cpus_done(unsigned int max_cpus) 389 389 { 390 + int cpu; 391 + unsigned long bogosum = 0; 392 + 393 + for_each_online_cpu(cpu) 394 + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; 395 + 396 + printk(KERN_INFO "SMP: Total of %d processors activated " 397 + "(%lu.%02lu BogoMIPS).\n", 398 + num_online_cpus(), 399 + bogosum / (500000/HZ), 400 + (bogosum / (5000/HZ)) % 100); 401 + 390 402 hyp_mode_check(); 391 403 } 392 404
+18
arch/arm/mach-at91/board-dt-sama5.c
··· 17 17 #include <linux/of_platform.h> 18 18 #include <linux/phy.h> 19 19 #include <linux/clk-provider.h> 20 + #include <linux/phy.h> 20 21 21 22 #include <asm/setup.h> 22 23 #include <asm/irq.h> ··· 27 26 28 27 #include "generic.h" 29 28 29 + static int ksz8081_phy_fixup(struct phy_device *phy) 30 + { 31 + int value; 32 + 33 + value = phy_read(phy, 0x16); 34 + value &= ~0x20; 35 + phy_write(phy, 0x16, value); 36 + 37 + return 0; 38 + } 39 + 30 40 static void __init sama5_dt_device_init(void) 31 41 { 42 + if (of_machine_is_compatible("atmel,sama5d4ek") && 43 + IS_ENABLED(CONFIG_PHYLIB)) { 44 + phy_register_fixup_for_id("fc028000.etherne:00", 45 + ksz8081_phy_fixup); 46 + } 47 + 32 48 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 33 49 } 34 50
+1 -1
arch/arm/mach-imx/clk-imx6q.c
··· 144 144 post_div_table[1].div = 1; 145 145 post_div_table[2].div = 1; 146 146 video_div_table[1].div = 1; 147 - video_div_table[2].div = 1; 147 + video_div_table[3].div = 1; 148 148 } 149 149 150 150 clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+3
arch/arm/mach-imx/clk-imx6sx.c
··· 558 558 clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); 559 559 clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); 560 560 561 + clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]); 562 + clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]); 563 + 561 564 /* Set initial power mode */ 562 565 imx6q_set_lpm(WAIT_CLOCKED); 563 566 }
+18
arch/arm/mach-omap2/board-generic.c
··· 77 77 #endif 78 78 79 79 #ifdef CONFIG_ARCH_OMAP3 80 + /* Some boards need board name for legacy userspace in /proc/cpuinfo */ 81 + static const char *const n900_boards_compat[] __initconst = { 82 + "nokia,omap3-n900", 83 + NULL, 84 + }; 85 + 86 + DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board") 87 + .reserve = omap_reserve, 88 + .map_io = omap3_map_io, 89 + .init_early = omap3430_init_early, 90 + .init_machine = omap_generic_init, 91 + .init_late = omap3_init_late, 92 + .init_time = omap3_sync32k_timer_init, 93 + .dt_compat = n900_boards_compat, 94 + .restart = omap3xxx_restart, 95 + MACHINE_END 96 + 97 + /* Generic omap3 boards, most boards can use these */ 80 98 static const char *const omap3_boards_compat[] __initconst = { 81 99 "ti,omap3430", 82 100 "ti,omap3",
+1
arch/arm/mach-omap2/common.h
··· 249 249 extern struct smp_operations omap4_smp_ops; 250 250 251 251 extern void omap5_secondary_startup(void); 252 + extern void omap5_secondary_hyp_startup(void); 252 253 #endif 253 254 254 255 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
+4
arch/arm/mach-omap2/control.h
··· 286 286 #define OMAP5XXX_CONTROL_STATUS 0x134 287 287 #define OMAP5_DEVICETYPE_MASK (0x7 << 6) 288 288 289 + /* DRA7XX CONTROL CORE BOOTSTRAP */ 290 + #define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4 291 + #define DRA7_SPEEDSELECT_MASK (0x3 << 8) 292 + 289 293 /* 290 294 * REVISIT: This list of registers is not comprehensive - there are more 291 295 * that should be added.
+21
arch/arm/mach-omap2/omap-headsmp.S
··· 22 22 23 23 /* Physical address needed since MMU not enabled yet on secondary core */ 24 24 #define AUX_CORE_BOOT0_PA 0x48281800 25 + #define API_HYP_ENTRY 0x102 25 26 26 27 /* 27 28 * OMAP5 specific entry point for secondary CPU to jump from ROM ··· 41 40 bne wait 42 41 b secondary_startup 43 42 ENDPROC(omap5_secondary_startup) 43 + /* 44 + * Same as omap5_secondary_startup except we call into the ROM to 45 + * enable HYP mode first. This is called instead of 46 + * omap5_secondary_startup if the primary CPU was put into HYP mode by 47 + * the boot loader. 48 + */ 49 + ENTRY(omap5_secondary_hyp_startup) 50 + wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 51 + ldr r0, [r2] 52 + mov r0, r0, lsr #5 53 + mrc p15, 0, r4, c0, c0, 5 54 + and r4, r4, #0x0f 55 + cmp r0, r4 56 + bne wait_2 57 + ldr r12, =API_HYP_ENTRY 58 + adr r0, hyp_boot 59 + smc #0 60 + hyp_boot: 61 + b secondary_startup 62 + ENDPROC(omap5_secondary_hyp_startup) 44 63 /* 45 64 * OMAP4 specific entry point for secondary CPU to jump from ROM 46 65 * code. This routine also provides a holding flag into which
+11 -2
arch/arm/mach-omap2/omap-smp.c
··· 22 22 #include <linux/irqchip/arm-gic.h> 23 23 24 24 #include <asm/smp_scu.h> 25 + #include <asm/virt.h> 25 26 26 27 #include "omap-secure.h" 27 28 #include "omap-wakeupgen.h" ··· 228 227 if (omap_secure_apis_support()) 229 228 omap_auxcoreboot_addr(virt_to_phys(startup_addr)); 230 229 else 231 - writel_relaxed(virt_to_phys(omap5_secondary_startup), 232 - base + OMAP_AUX_CORE_BOOT_1); 230 + /* 231 + * If the boot CPU is in HYP mode then start secondary 232 + * CPU in HYP mode as well. 233 + */ 234 + if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 235 + writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup), 236 + base + OMAP_AUX_CORE_BOOT_1); 237 + else 238 + writel_relaxed(virt_to_phys(omap5_secondary_startup), 239 + base + OMAP_AUX_CORE_BOOT_1); 233 240 234 241 } 235 242
+38 -6
arch/arm/mach-omap2/timer.c
··· 54 54 55 55 #include "soc.h" 56 56 #include "common.h" 57 + #include "control.h" 57 58 #include "powerdomain.h" 58 59 #include "omap-secure.h" 59 60 ··· 497 496 void __iomem *base; 498 497 static struct clk *sys_clk; 499 498 unsigned long rate; 500 - unsigned int reg, num, den; 499 + unsigned int reg; 500 + unsigned long long num, den; 501 501 502 502 base = ioremap(REALTIME_COUNTER_BASE, SZ_32); 503 503 if (!base) { ··· 513 511 } 514 512 515 513 rate = clk_get_rate(sys_clk); 514 + 515 + if (soc_is_dra7xx()) { 516 + /* 517 + * Errata i856 says the 32.768KHz crystal does not start at 518 + * power on, so the CPU falls back to an emulated 32KHz clock 519 + * based on sysclk / 610 instead. This causes the master counter 520 + * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2 521 + * (OR sysclk * 75 / 244) 522 + * 523 + * This affects at least the DRA7/AM572x 1.0, 1.1 revisions. 524 + * Of course any board built without a populated 32.768KHz 525 + * crystal would also need this fix even if the CPU is fixed 526 + * later. 527 + * 528 + * Either case can be detected by using the two speedselect bits 529 + * If they are not 0, then the 32.768KHz clock driving the 530 + * coarse counter that corrects the fine counter every time it 531 + * ticks is actually rate/610 rather than 32.768KHz and we 532 + * should compensate to avoid the 570ppm (at 20MHz, much worse 533 + * at other rates) too fast system time. 534 + */ 535 + reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP); 536 + if (reg & DRA7_SPEEDSELECT_MASK) { 537 + num = 75; 538 + den = 244; 539 + goto sysclk1_based; 540 + } 541 + } 542 + 516 543 /* Numerator/denumerator values refer TRM Realtime Counter section */ 517 544 switch (rate) { 518 - case 1200000: 545 + case 12000000: 519 546 num = 64; 520 547 den = 125; 521 548 break; 522 - case 1300000: 549 + case 13000000: 523 550 num = 768; 524 551 den = 1625; 525 552 break; ··· 560 529 num = 192; 561 530 den = 625; 562 531 break; 563 - case 2600000: 532 + case 26000000: 564 533 num = 384; 565 534 den = 1625; 566 535 break; 567 - case 2700000: 536 + case 27000000: 568 537 num = 256; 569 538 den = 1125; 570 539 break; ··· 576 545 break; 577 546 } 578 547 548 + sysclk1_based: 579 549 /* Program numerator and denumerator registers */ 580 550 reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) & 581 551 NUMERATOR_DENUMERATOR_MASK; ··· 588 556 reg |= den; 589 557 writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET); 590 558 591 - arch_timer_freq = (rate / den) * num; 559 + arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den); 592 560 set_cntfreq(); 593 561 594 562 iounmap(base);
+27
arch/arm/mach-rockchip/rockchip.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/of_platform.h> 21 21 #include <linux/irqchip.h> 22 + #include <linux/clk-provider.h> 23 + #include <linux/clocksource.h> 24 + #include <linux/mfd/syscon.h> 25 + #include <linux/regmap.h> 22 26 #include <asm/mach/arch.h> 23 27 #include <asm/mach/map.h> 24 28 #include <asm/hardware/cache-l2x0.h> 25 29 #include "core.h" 30 + 31 + #define RK3288_GRF_SOC_CON0 0x244 32 + 33 + static void __init rockchip_timer_init(void) 34 + { 35 + if (of_machine_is_compatible("rockchip,rk3288")) { 36 + struct regmap *grf; 37 + 38 + /* 39 + * Disable auto jtag/sdmmc switching that causes issues 40 + * with the mmc controllers making them unreliable 41 + */ 42 + grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf"); 43 + if (!IS_ERR(grf)) 44 + regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000); 45 + else 46 + pr_err("rockchip: could not get grf syscon\n"); 47 + } 48 + 49 + of_clk_init(NULL); 50 + clocksource_of_init(); 51 + } 26 52 27 53 static void __init rockchip_dt_init(void) 28 54 { ··· 68 42 DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") 69 43 .l2c_aux_val = 0, 70 44 .l2c_aux_mask = ~0, 45 + .init_time = rockchip_timer_init, 71 46 .dt_compat = rockchip_board_dt_compat, 72 47 .init_machine = rockchip_dt_init, 73 48 MACHINE_END
+7
arch/arm/mach-shmobile/setup-r8a7740.c
··· 800 800 void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10); 801 801 void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4); 802 802 803 + #ifdef CONFIG_ARCH_SHMOBILE_LEGACY 804 + void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000); 805 + void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000); 806 + 807 + gic_init(0, 29, gic_dist_base, gic_cpu_base); 808 + #else 803 809 irqchip_init(); 810 + #endif 804 811 805 812 /* route signals to GIC */ 806 813 iowrite32(0x0, pfc_inta_ctrl);
+3
arch/arm/mach-shmobile/setup-sh73a0.c
··· 595 595 596 596 static struct renesas_intc_irqpin_config irqpin0_platform_data = { 597 597 .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ 598 + .control_parent = true, 598 599 }; 599 600 600 601 static struct resource irqpin0_resources[] = { ··· 657 656 658 657 static struct renesas_intc_irqpin_config irqpin2_platform_data = { 659 658 .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ 659 + .control_parent = true, 660 660 }; 661 661 662 662 static struct resource irqpin2_resources[] = { ··· 688 686 689 687 static struct renesas_intc_irqpin_config irqpin3_platform_data = { 690 688 .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ 689 + .control_parent = true, 691 690 }; 692 691 693 692 static struct resource irqpin3_resources[] = {
+2 -7
arch/arm/mm/dump.c
··· 220 220 static const char units[] = "KMGTPE"; 221 221 u64 prot = val & pg_level[level].mask; 222 222 223 - if (addr < USER_PGTABLES_CEILING) 224 - return; 225 - 226 223 if (!st->level) { 227 224 st->level = level; 228 225 st->current_prot = prot; ··· 305 308 pgd_t *pgd = swapper_pg_dir; 306 309 struct pg_state st; 307 310 unsigned long addr; 308 - unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; 311 + unsigned i; 309 312 310 313 memset(&st, 0, sizeof(st)); 311 314 st.seq = m; 312 315 st.marker = address_markers; 313 316 314 - pgd += pgdoff; 315 - 316 - for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) { 317 + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 317 318 addr = i * PGDIR_SIZE; 318 319 if (!pgd_none(*pgd)) { 319 320 walk_pud(&st, pgd, addr);
+2 -2
arch/arm/mm/init.c
··· 658 658 .start = (unsigned long)_stext, 659 659 .end = (unsigned long)__init_begin, 660 660 #ifdef CONFIG_ARM_LPAE 661 - .mask = ~PMD_SECT_RDONLY, 662 - .prot = PMD_SECT_RDONLY, 661 + .mask = ~L_PMD_SECT_RDONLY, 662 + .prot = L_PMD_SECT_RDONLY, 663 663 #else 664 664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 665 665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+2 -2
arch/arm/mm/mmu.c
··· 1329 1329 static void __init map_lowmem(void) 1330 1330 { 1331 1331 struct memblock_region *reg; 1332 - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1332 + phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1333 + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1334 1334 1335 1335 /* Map all the lowmem memory banks. */ 1336 1336 for_each_memblock(memory, reg) {
+1
arch/arm64/include/asm/arch_timer.h
··· 21 21 22 22 #include <asm/barrier.h> 23 23 24 + #include <linux/bug.h> 24 25 #include <linux/init.h> 25 26 #include <linux/types.h> 26 27
+5
arch/arm64/include/asm/cpu.h
··· 39 39 u64 reg_id_aa64pfr0; 40 40 u64 reg_id_aa64pfr1; 41 41 42 + u32 reg_id_dfr0; 42 43 u32 reg_id_isar0; 43 44 u32 reg_id_isar1; 44 45 u32 reg_id_isar2; ··· 52 51 u32 reg_id_mmfr3; 53 52 u32 reg_id_pfr0; 54 53 u32 reg_id_pfr1; 54 + 55 + u32 reg_mvfr0; 56 + u32 reg_mvfr1; 57 + u32 reg_mvfr2; 55 58 }; 56 59 57 60 DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+2
arch/arm64/include/asm/kvm_emulate.h
··· 41 41 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 42 42 { 43 43 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; 44 + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) 45 + vcpu->arch.hcr_el2 &= ~HCR_RW; 44 46 } 45 47 46 48 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+1 -3
arch/arm64/include/asm/processor.h
··· 31 31 32 32 #include <asm/fpsimd.h> 33 33 #include <asm/hw_breakpoint.h> 34 + #include <asm/pgtable-hwdef.h> 34 35 #include <asm/ptrace.h> 35 36 #include <asm/types.h> 36 37 ··· 123 122 124 123 /* Free all resources held by a thread. */ 125 124 extern void release_thread(struct task_struct *); 126 - 127 - /* Prepare to copy thread state - unlazy all lazy status */ 128 - #define prepare_to_copy(tsk) do { } while (0) 129 125 130 126 unsigned long get_wchan(struct task_struct *p); 131 127
+1 -1
arch/arm64/include/asm/unistd.h
··· 44 44 #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) 45 45 #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) 46 46 47 - #define __NR_compat_syscalls 386 47 + #define __NR_compat_syscalls 388 48 48 #endif 49 49 50 50 #define __ARCH_WANT_SYS_CLONE
+2
arch/arm64/include/asm/unistd32.h
··· 795 795 __SYSCALL(__NR_memfd_create, sys_memfd_create) 796 796 #define __NR_bpf 386 797 797 __SYSCALL(__NR_bpf, sys_bpf) 798 + #define __NR_execveat 387 799 + __SYSCALL(__NR_execveat, compat_sys_execveat)
+10
arch/arm64/kernel/cpuinfo.c
··· 147 147 * If we have AArch32, we care about 32-bit features for compat. These 148 148 * registers should be RES0 otherwise. 149 149 */ 150 + diff |= CHECK(id_dfr0, boot, cur, cpu); 150 151 diff |= CHECK(id_isar0, boot, cur, cpu); 151 152 diff |= CHECK(id_isar1, boot, cur, cpu); 152 153 diff |= CHECK(id_isar2, boot, cur, cpu); ··· 165 164 diff |= CHECK(id_mmfr3, boot, cur, cpu); 166 165 diff |= CHECK(id_pfr0, boot, cur, cpu); 167 166 diff |= CHECK(id_pfr1, boot, cur, cpu); 167 + 168 + diff |= CHECK(mvfr0, boot, cur, cpu); 169 + diff |= CHECK(mvfr1, boot, cur, cpu); 170 + diff |= CHECK(mvfr2, boot, cur, cpu); 168 171 169 172 /* 170 173 * Mismatched CPU features are a recipe for disaster. Don't even ··· 194 189 info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); 195 190 info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); 196 191 192 + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); 197 193 info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); 198 194 info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); 199 195 info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); ··· 207 201 info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); 208 202 info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); 209 203 info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); 204 + 205 + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); 206 + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); 207 + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); 210 208 211 209 cpuinfo_detect_icache_policy(info); 212 210
+1 -1
arch/arm64/kernel/efi.c
··· 326 326 327 327 /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ 328 328 efi_setup_idmap(); 329 + early_memunmap(memmap.map, memmap.map_end - memmap.map); 329 330 } 330 331 331 332 static int __init remap_region(efi_memory_desc_t *md, void **new) ··· 381 380 } 382 381 383 382 mapsize = memmap.map_end - memmap.map; 384 - early_memunmap(memmap.map, mapsize); 385 383 386 384 if (efi_runtime_disabled()) { 387 385 pr_info("EFI runtime services will be disabled.\n");
+1
arch/arm64/kernel/module.c
··· 25 25 #include <linux/mm.h> 26 26 #include <linux/moduleloader.h> 27 27 #include <linux/vmalloc.h> 28 + #include <asm/alternative.h> 28 29 #include <asm/insn.h> 29 30 #include <asm/sections.h> 30 31
+8
arch/arm64/kernel/perf_regs.c
··· 50 50 else 51 51 return PERF_SAMPLE_REGS_ABI_64; 52 52 } 53 + 54 + void perf_get_regs_user(struct perf_regs *regs_user, 55 + struct pt_regs *regs, 56 + struct pt_regs *regs_user_copy) 57 + { 58 + regs_user->regs = task_pt_regs(current); 59 + regs_user->abi = perf_reg_abi(current); 60 + }
+1
arch/arm64/kernel/setup.c
··· 402 402 request_standard_resources(); 403 403 404 404 efi_idmap_init(); 405 + early_ioremap_reset(); 405 406 406 407 unflatten_device_tree(); 407 408
+1
arch/arm64/kernel/smp_spin_table.c
··· 25 25 #include <asm/cacheflush.h> 26 26 #include <asm/cpu_ops.h> 27 27 #include <asm/cputype.h> 28 + #include <asm/io.h> 28 29 #include <asm/smp_plat.h> 29 30 30 31 extern void secondary_holding_pen(void);
+1
arch/arm64/kvm/hyp.S
··· 1014 1014 * Instead, we invalidate Stage-2 for this IPA, and the 1015 1015 * whole of Stage-1. Weep... 1016 1016 */ 1017 + lsr x1, x1, #12 1017 1018 tlbi ipas2e1is, x1 1018 1019 /* 1019 1020 * We have to ensure completion of the invalidation at Stage-2,
-1
arch/arm64/kvm/reset.c
··· 90 90 if (!cpu_has_32bit_el1()) 91 91 return -EINVAL; 92 92 cpu_reset = &default_regs_reset32; 93 - vcpu->arch.hcr_el2 &= ~HCR_RW; 94 93 } else { 95 94 cpu_reset = &default_regs_reset; 96 95 }
+1 -7
arch/arm64/mm/init.c
··· 335 335 336 336 void free_initrd_mem(unsigned long start, unsigned long end) 337 337 { 338 - if (!keep_initrd) { 339 - if (start == initrd_start) 340 - start = round_down(start, PAGE_SIZE); 341 - if (end == initrd_end) 342 - end = round_up(end, PAGE_SIZE); 343 - 338 + if (!keep_initrd) 344 339 free_reserved_area((void *)start, (void *)end, 0, "initrd"); 345 - } 346 340 } 347 341 348 342 static int __init keepinitrd_setup(char *__unused)
+1
arch/blackfin/mach-bf533/boards/stamp.c
··· 7 7 */ 8 8 9 9 #include <linux/device.h> 10 + #include <linux/delay.h> 10 11 #include <linux/platform_device.h> 11 12 #include <linux/mtd/mtd.h> 12 13 #include <linux/mtd/partitions.h>
+1 -1
arch/ia64/include/asm/unistd.h
··· 11 11 12 12 13 13 14 - #define NR_syscalls 318 /* length of syscall table */ 14 + #define NR_syscalls 319 /* length of syscall table */ 15 15 16 16 /* 17 17 * The following defines stop scripts/checksyscalls.sh from complaining about
+1
arch/ia64/include/uapi/asm/unistd.h
··· 331 331 #define __NR_getrandom 1339 332 332 #define __NR_memfd_create 1340 333 333 #define __NR_bpf 1341 334 + #define __NR_execveat 1342 334 335 335 336 #endif /* _UAPI_ASM_IA64_UNISTD_H */
+4 -5
arch/ia64/kernel/acpi.c
··· 893 893 } 894 894 895 895 /* wrapper to silence section mismatch warning */ 896 - int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 896 + int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) 897 897 { 898 898 return _acpi_map_lsapic(handle, physid, pcpu); 899 899 } 900 - EXPORT_SYMBOL(acpi_map_lsapic); 900 + EXPORT_SYMBOL(acpi_map_cpu); 901 901 902 - int acpi_unmap_lsapic(int cpu) 902 + int acpi_unmap_cpu(int cpu) 903 903 { 904 904 ia64_cpu_to_sapicid[cpu] = -1; 905 905 set_cpu_present(cpu, false); ··· 910 910 911 911 return (0); 912 912 } 913 - 914 - EXPORT_SYMBOL(acpi_unmap_lsapic); 913 + EXPORT_SYMBOL(acpi_unmap_cpu); 915 914 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 916 915 917 916 #ifdef CONFIG_ACPI_NUMA
+1
arch/ia64/kernel/entry.S
··· 1779 1779 data8 sys_getrandom 1780 1780 data8 sys_memfd_create // 1340 1781 1781 data8 sys_bpf 1782 + data8 sys_execveat 1782 1783 1783 1784 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1784 1785 #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
+1 -1
arch/m68k/include/asm/unistd.h
··· 4 4 #include <uapi/asm/unistd.h> 5 5 6 6 7 - #define NR_syscalls 355 7 + #define NR_syscalls 356 8 8 9 9 #define __ARCH_WANT_OLD_READDIR 10 10 #define __ARCH_WANT_OLD_STAT
+1
arch/m68k/include/uapi/asm/unistd.h
··· 360 360 #define __NR_getrandom 352 361 361 #define __NR_memfd_create 353 362 362 #define __NR_bpf 354 363 + #define __NR_execveat 355 363 364 364 365 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
+1
arch/m68k/kernel/syscalltable.S
··· 375 375 .long sys_getrandom 376 376 .long sys_memfd_create 377 377 .long sys_bpf 378 + .long sys_execveat /* 355 */ 378 379
+1
arch/nios2/kernel/cpuinfo.c
··· 72 72 cpuinfo.has_div = fcpu_has(cpu, "altr,has-div"); 73 73 cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul"); 74 74 cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx"); 75 + cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu"); 75 76 76 77 if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div) 77 78 err_cpu("DIV");
+2 -18
arch/nios2/kernel/entry.S
··· 365 365 GET_THREAD_INFO r1 366 366 ldw r4, TI_PREEMPT_COUNT(r1) 367 367 bne r4, r0, restore_all 368 - 369 - need_resched: 370 368 ldw r4, TI_FLAGS(r1) /* ? Need resched set */ 371 369 BTBZ r10, r4, TIF_NEED_RESCHED, restore_all 372 370 ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ 373 371 andi r10, r4, ESTATUS_EPIE 374 372 beq r10, r0, restore_all 375 - movia r4, PREEMPT_ACTIVE 376 - stw r4, TI_PREEMPT_COUNT(r1) 377 - rdctl r10, status /* enable intrs again */ 378 - ori r10, r10 ,STATUS_PIE 379 - wrctl status, r10 380 - PUSH r1 381 - call schedule 382 - POP r1 383 - mov r4, r0 384 - stw r4, TI_PREEMPT_COUNT(r1) 385 - rdctl r10, status /* disable intrs */ 386 - andi r10, r10, %lo(~STATUS_PIE) 387 - wrctl status, r10 388 - br need_resched 389 - #else 390 - br restore_all 373 + call preempt_schedule_irq 391 374 #endif 375 + br restore_all 392 376 393 377 /*********************************************************************** 394 378 * A few syscall wrappers
+10
arch/powerpc/include/asm/kexec.h
··· 86 86 extern void reserve_crashkernel(void); 87 87 extern void machine_kexec_mask_interrupts(void); 88 88 89 + static inline bool kdump_in_progress(void) 90 + { 91 + return crashing_cpu >= 0; 92 + } 93 + 89 94 #else /* !CONFIG_KEXEC */ 90 95 static inline void crash_kexec_secondary(struct pt_regs *regs) { } 91 96 ··· 109 104 static inline int crash_shutdown_unregister(crash_shutdown_t handler) 110 105 { 111 106 return 0; 107 + } 108 + 109 + static inline bool kdump_in_progress(void) 110 + { 111 + return false; 112 112 } 113 113 114 114 #endif /* CONFIG_KEXEC */
+1
arch/powerpc/include/asm/systbl.h
··· 366 366 SYSCALL_SPU(getrandom) 367 367 SYSCALL_SPU(memfd_create) 368 368 SYSCALL_SPU(bpf) 369 + COMPAT_SYS(execveat)
+7 -6
arch/powerpc/include/asm/thread_info.h
··· 23 23 #define THREAD_SIZE (1 << THREAD_SHIFT) 24 24 25 25 #ifdef CONFIG_PPC64 26 - #define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT 26 + #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) 27 27 #else 28 - #define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT 28 + #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) 29 29 #endif 30 30 31 31 #ifndef __ASSEMBLY__ ··· 71 71 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 72 72 73 73 /* how to get the thread information struct from C */ 74 - register unsigned long __current_r1 asm("r1"); 75 74 static inline struct thread_info *current_thread_info(void) 76 75 { 77 - /* gcc4, at least, is smart enough to turn this into a single 78 - * rlwinm for ppc32 and clrrdi for ppc64 */ 79 - return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); 76 + unsigned long val; 77 + 78 + asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); 79 + 80 + return (struct thread_info *)val; 80 81 } 81 82 82 83 #endif /* __ASSEMBLY__ */
+1 -1
arch/powerpc/include/asm/unistd.h
··· 12 12 #include <uapi/asm/unistd.h> 13 13 14 14 15 - #define __NR_syscalls 362 15 + #define __NR_syscalls 363 16 16 17 17 #define __NR__exit __NR_exit 18 18 #define NR_syscalls __NR_syscalls
+1
arch/powerpc/include/uapi/asm/unistd.h
··· 384 384 #define __NR_getrandom 359 385 385 #define __NR_memfd_create 360 386 386 #define __NR_bpf 361 387 + #define __NR_execveat 362 387 388 388 389 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
+1 -1
arch/powerpc/kernel/machine_kexec_64.c
··· 330 330 * using debugger IPI. 331 331 */ 332 332 333 - if (crashing_cpu == -1) 333 + if (!kdump_in_progress()) 334 334 kexec_prepare_cpus(); 335 335 336 336 pr_debug("kexec: Starting switchover sequence.\n");
+1 -8
arch/powerpc/kernel/smp.c
··· 700 700 smp_store_cpu_info(cpu); 701 701 set_dec(tb_ticks_per_jiffy); 702 702 preempt_disable(); 703 + cpu_callin_map[cpu] = 1; 703 704 704 705 if (smp_ops->setup_cpu) 705 706 smp_ops->setup_cpu(cpu); ··· 738 737 smp_wmb(); 739 738 notify_cpu_starting(cpu); 740 739 set_cpu_online(cpu, true); 741 - 742 - /* 743 - * CPU must be marked active and online before we signal back to the 744 - * master, because the scheduler needs to see the cpu_online and 745 - * cpu_active bits set. 746 - */ 747 - smp_wmb(); 748 - cpu_callin_map[cpu] = 1; 749 740 750 741 local_irq_enable(); 751 742
-1
arch/powerpc/platforms/powernv/opal-wrappers.S
··· 40 40 b 1f; \ 41 41 END_FTR_SECTION(0, 1); \ 42 42 ld r12,opal_tracepoint_refcount@toc(r2); \ 43 - std r12,32(r1); \ 44 43 cmpdi r12,0; \ 45 44 bne- LABEL; \ 46 45 1:
+7 -1
arch/powerpc/platforms/pseries/lpar.c
··· 43 43 #include <asm/trace.h> 44 44 #include <asm/firmware.h> 45 45 #include <asm/plpar_wrappers.h> 46 + #include <asm/kexec.h> 46 47 #include <asm/fadump.h> 47 48 48 49 #include "pseries.h" ··· 268 267 * out to the user, but at least this will stop us from 269 268 * continuing on further and creating an even more 270 269 * difficult to debug situation. 270 + * 271 + * There is a known problem when kdump'ing, if cpus are offline 272 + * the above call will fail. Rather than panicking again, keep 273 + * going and hope the kdump kernel is also little endian, which 274 + * it usually is. 271 275 */ 272 - if (rc) 276 + if (rc && !kdump_in_progress()) 273 277 panic("Could not enable big endian exceptions"); 274 278 } 275 279 #endif
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 231 231 struct dbfs_d2fc_hdr { 232 232 u64 len; /* Length of d2fc buffer without header */ 233 233 u16 version; /* Version of header */ 234 - char tod_ext[16]; /* TOD clock for d2fc */ 234 + char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ 235 235 u64 count; /* Number of VM guests in d2fc buffer */ 236 236 char reserved[30]; 237 237 } __attribute__ ((packed));
+1 -1
arch/s390/include/asm/irqflags.h
··· 36 36 37 37 static inline notrace unsigned long arch_local_save_flags(void) 38 38 { 39 - return __arch_local_irq_stosm(0x00); 39 + return __arch_local_irq_stnsm(0xff); 40 40 } 41 41 42 42 static inline notrace unsigned long arch_local_irq_save(void)
+6 -4
arch/s390/include/asm/timex.h
··· 67 67 set_clock_comparator(S390_lowcore.clock_comparator); 68 68 } 69 69 70 - #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 70 + #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 71 + #define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ 71 72 72 73 typedef unsigned long long cycles_t; 73 74 74 - static inline void get_tod_clock_ext(char clk[16]) 75 + static inline void get_tod_clock_ext(char *clk) 75 76 { 76 - typedef struct { char _[sizeof(clk)]; } addrtype; 77 + typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; 77 78 78 79 asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); 79 80 } 80 81 81 82 static inline unsigned long long get_tod_clock(void) 82 83 { 83 - unsigned char clk[16]; 84 + unsigned char clk[STORE_CLOCK_EXT_SIZE]; 85 + 84 86 get_tod_clock_ext(clk); 85 87 return *((unsigned long long *)&clk[1]); 86 88 }
+2 -1
arch/s390/include/uapi/asm/unistd.h
··· 289 289 #define __NR_bpf 351 290 290 #define __NR_s390_pci_mmio_write 352 291 291 #define __NR_s390_pci_mmio_read 353 292 - #define NR_syscalls 354 292 + #define __NR_execveat 354 293 + #define NR_syscalls 355 293 294 294 295 /* 295 296 * There are some system calls that are not present on 64 bit, some
+1
arch/s390/kernel/syscalls.S
··· 362 362 SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) 363 363 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) 364 364 SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) 365 + SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
+60 -9
arch/s390/kernel/uprobes.c
··· 48 48 return false; 49 49 } 50 50 51 + static int check_per_event(unsigned short cause, unsigned long control, 52 + struct pt_regs *regs) 53 + { 54 + if (!(regs->psw.mask & PSW_MASK_PER)) 55 + return 0; 56 + /* user space single step */ 57 + if (control == 0) 58 + return 1; 59 + /* over indication for storage alteration */ 60 + if ((control & 0x20200000) && (cause & 0x2000)) 61 + return 1; 62 + if (cause & 0x8000) { 63 + /* all branches */ 64 + if ((control & 0x80800000) == 0x80000000) 65 + return 1; 66 + /* branch into selected range */ 67 + if (((control & 0x80800000) == 0x80800000) && 68 + regs->psw.addr >= current->thread.per_user.start && 69 + regs->psw.addr <= current->thread.per_user.end) 70 + return 1; 71 + } 72 + return 0; 73 + } 74 + 51 75 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) 52 76 { 53 77 int fixup = probe_get_fixup_type(auprobe->insn); ··· 95 71 if (regs->psw.addr - utask->xol_vaddr == ilen) 96 72 regs->psw.addr = utask->vaddr + ilen; 97 73 } 98 - /* If per tracing was active generate trap */ 99 - if (regs->psw.mask & PSW_MASK_PER) 100 - do_per_trap(regs); 74 + if (check_per_event(current->thread.per_event.cause, 75 + current->thread.per_user.control, regs)) { 76 + /* fix per address */ 77 + current->thread.per_event.address = utask->vaddr; 78 + /* trigger per event */ 79 + set_pt_regs_flag(regs, PIF_PER_TRAP); 80 + } 101 81 return 0; 102 82 } 103 83 ··· 134 106 clear_thread_flag(TIF_UPROBE_SINGLESTEP); 135 107 regs->int_code = auprobe->saved_int_code; 136 108 regs->psw.addr = current->utask->vaddr; 109 + current->thread.per_event.address = current->utask->vaddr; 137 110 } 138 111 139 112 unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, ··· 175 146 __rc; \ 176 147 }) 177 148 178 - #define emu_store_ril(ptr, input) \ 149 + #define emu_store_ril(regs, ptr, input) \ 179 150 ({ \ 180 151 unsigned int mask = sizeof(*(ptr)) - 1; \ 152 + __typeof__(ptr) __ptr = (ptr); \ 181 153 int __rc = 0; \ 182 154 \ 183 155 if (!test_facility(34)) \ 184 156 __rc = EMU_ILLEGAL_OP; \ 185 - else if ((u64 __force)ptr & mask) \ 157 + else if ((u64 __force)__ptr & mask) \ 186 158 __rc = EMU_SPECIFICATION; \ 187 - else if (put_user(*(input), ptr)) \ 159 + else if (put_user(*(input), __ptr)) \ 188 160 __rc = EMU_ADDRESSING; \ 161 + if (__rc == 0) \ 162 + sim_stor_event(regs, __ptr, mask + 1); \ 189 163 __rc; \ 190 164 }) 191 165 ··· 228 196 s32 s32[2]; 229 197 s16 s16[4]; 230 198 }; 199 + 200 + /* 201 + * If user per registers are setup to trace storage alterations and an 202 + * emulated store took place on a fitting address a user trap is generated. 203 + */ 204 + static void sim_stor_event(struct pt_regs *regs, void *addr, int len) 205 + { 206 + if (!(regs->psw.mask & PSW_MASK_PER)) 207 + return; 208 + if (!(current->thread.per_user.control & PER_EVENT_STORE)) 209 + return; 210 + if ((void *)current->thread.per_user.start > (addr + len)) 211 + return; 212 + if ((void *)current->thread.per_user.end < addr) 213 + return; 214 + current->thread.per_event.address = regs->psw.addr; 215 + current->thread.per_event.cause = PER_EVENT_STORE >> 16; 216 + set_pt_regs_flag(regs, PIF_PER_TRAP); 217 + } 231 218 232 219 /* 233 220 * pc relative instructions are emulated, since parameters may not be ··· 300 249 rc = emu_load_ril((u32 __user *)uptr, &rx->u64); 301 250 break; 302 251 case 0x07: /* sthrl */ 303 - rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); 252 + rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); 304 253 break; 305 254 case 0x0b: /* stgrl */ 306 - rc = emu_store_ril((u64 __user *)uptr, &rx->u64); 255 + rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); 307 256 break; 308 257 case 0x0f: /* strl */ 309 - rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); 258 + rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); 310 259 break; 311 260 } 312 261 break;
-2
arch/s390/kernel/vtime.c
··· 128 128 struct thread_info *ti = task_thread_info(tsk); 129 129 u64 timer, system; 130 130 131 - WARN_ON_ONCE(!irqs_disabled()); 132 - 133 131 timer = S390_lowcore.last_update_timer; 134 132 S390_lowcore.last_update_timer = get_vtimer(); 135 133 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
+3 -2
arch/s390/mm/pgtable.c
··· 322 322 static unsigned long __gmap_segment_gaddr(unsigned long *entry) 323 323 { 324 324 struct page *page; 325 - unsigned long offset; 325 + unsigned long offset, mask; 326 326 327 327 offset = (unsigned long) entry / sizeof(unsigned long); 328 328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; 329 - page = pmd_to_page((pmd_t *) entry); 329 + mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 330 + page = virt_to_page((void *)((unsigned long) entry & mask)); 330 331 return page->index + offset; 331 332 } 332 333
+4 -4
arch/s390/net/bpf_jit_comp.c
··· 431 431 EMIT4_DISP(0x88500000, K); 432 432 break; 433 433 case BPF_ALU | BPF_NEG: /* A = -A */ 434 - /* lnr %r5,%r5 */ 435 - EMIT2(0x1155); 434 + /* lcr %r5,%r5 */ 435 + EMIT2(0x1355); 436 436 break; 437 437 case BPF_JMP | BPF_JA: /* ip += K */ 438 438 offset = addrs[i + K] + jit->start - jit->prg; ··· 502 502 xbranch: /* Emit compare if the branch targets are different */ 503 503 if (filter->jt != filter->jf) { 504 504 jit->seen |= SEEN_XREG; 505 - /* cr %r5,%r12 */ 506 - EMIT2(0x195c); 505 + /* clr %r5,%r12 */ 506 + EMIT2(0x155c); 507 507 } 508 508 goto branch; 509 509 case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
+1
arch/um/Kconfig.common
··· 3 3 default y 4 4 select HAVE_ARCH_AUDITSYSCALL 5 5 select HAVE_UID16 6 + select HAVE_FUTEX_CMPXCHG if FUTEX 6 7 select GENERIC_IRQ_SHOW 7 8 select GENERIC_CPU_DEVICES 8 9 select GENERIC_IO
+1
arch/x86/boot/Makefile
··· 51 51 $(obj)/cpustr.h: $(obj)/mkcpustr FORCE 52 52 $(call if_changed,cpustr) 53 53 endif 54 + clean-files += cpustr.h 54 55 55 56 # --------------------------------------------------------------------------- 56 57
+1 -1
arch/x86/crypto/Makefile
··· 26 26 27 27 obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o 28 28 obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o 29 - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ 30 29 obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 31 30 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 32 31 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o ··· 45 46 ifeq ($(avx2_supported),yes) 46 47 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 47 48 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 49 + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ 48 50 endif 49 51 50 52 aes-i586-y := aes-i586-asm_32.o aes_glue.o
+35 -11
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
··· 208 208 209 209 .if (klen == KEY_128) 210 210 .if (load_keys) 211 - vmovdqa 3*16(p_keys), xkeyA 211 + vmovdqa 3*16(p_keys), xkey4 212 212 .endif 213 213 .else 214 214 vmovdqa 3*16(p_keys), xkeyA ··· 224 224 add $(16*by), p_in 225 225 226 226 .if (klen == KEY_128) 227 - vmovdqa 4*16(p_keys), xkey4 227 + vmovdqa 4*16(p_keys), xkeyB 228 228 .else 229 229 .if (load_keys) 230 230 vmovdqa 4*16(p_keys), xkey4 ··· 234 234 .set i, 0 235 235 .rept by 236 236 club XDATA, i 237 - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ 237 + /* key 3 */ 238 + .if (klen == KEY_128) 239 + vaesenc xkey4, var_xdata, var_xdata 240 + .else 241 + vaesenc xkeyA, var_xdata, var_xdata 242 + .endif 238 243 .set i, (i +1) 239 244 .endr 240 245 ··· 248 243 .set i, 0 249 244 .rept by 250 245 club XDATA, i 251 - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ 246 + /* key 4 */ 247 + .if (klen == KEY_128) 248 + vaesenc xkeyB, var_xdata, var_xdata 249 + .else 250 + vaesenc xkey4, var_xdata, var_xdata 251 + .endif 252 252 .set i, (i +1) 253 253 .endr 254 254 255 255 .if (klen == KEY_128) 256 256 .if (load_keys) 257 - vmovdqa 6*16(p_keys), xkeyB 257 + vmovdqa 6*16(p_keys), xkey8 258 258 .endif 259 259 .else 260 260 vmovdqa 6*16(p_keys), xkeyB ··· 277 267 .set i, 0 278 268 .rept by 279 269 club XDATA, i 280 - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ 270 + /* key 6 */ 271 + .if (klen == KEY_128) 272 + vaesenc xkey8, var_xdata, var_xdata 273 + .else 274 + vaesenc xkeyB, var_xdata, var_xdata 275 + .endif 281 276 .set i, (i +1) 282 277 .endr 283 278 284 279 .if (klen == KEY_128) 285 - vmovdqa 8*16(p_keys), xkey8 280 + vmovdqa 8*16(p_keys), xkeyB 286 281 .else 287 282 .if (load_keys) 288 283 vmovdqa 8*16(p_keys), xkey8 ··· 303 288 304 289 .if (klen == KEY_128) 305 290 .if (load_keys) 306 - vmovdqa 9*16(p_keys), xkeyA 291 + vmovdqa 9*16(p_keys), xkey12 307 292 .endif 308 293 .else 309 294 vmovdqa 9*16(p_keys), xkeyA ··· 312 297 .set i, 0 313 298 .rept by 314 299 club XDATA, i 315 - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ 300 + /* key 8 */ 301 + .if (klen == KEY_128) 302 + vaesenc xkeyB, var_xdata, var_xdata 303 + .else 304 + vaesenc xkey8, var_xdata, var_xdata 305 + .endif 316 306 .set i, (i +1) 317 307 .endr 318 308 ··· 326 306 .set i, 0 327 307 .rept by 328 308 club XDATA, i 329 - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ 309 + /* key 9 */ 310 + .if (klen == KEY_128) 311 + vaesenc xkey12, var_xdata, var_xdata 312 + .else 313 + vaesenc xkeyA, var_xdata, var_xdata 314 + .endif 330 315 .set i, (i +1) 331 316 .endr 332 317 ··· 437 412 /* main body of aes ctr load */ 438 413 439 414 .macro do_aes_ctrmain key_len 440 - 441 415 cmp $16, num_bytes 442 416 jb .Ldo_return2\key_len 443 417
+4 -2
arch/x86/include/asm/vgtod.h
··· 80 80 81 81 /* 82 82 * Load per CPU data from GDT. LSL is faster than RDTSCP and 83 - * works on all CPUs. 83 + * works on all CPUs. This is volatile so that it orders 84 + * correctly wrt barrier() and to keep gcc from cleverly 85 + * hoisting it out of the calling function. 84 86 */ 85 - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 87 + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 86 88 87 89 return p; 88 90 }
+4 -5
arch/x86/kernel/acpi/boot.c
··· 750 750 } 751 751 752 752 /* wrapper to silence section mismatch warning */ 753 - int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) 753 + int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) 754 754 { 755 755 return _acpi_map_lsapic(handle, physid, pcpu); 756 756 } 757 - EXPORT_SYMBOL(acpi_map_lsapic); 757 + EXPORT_SYMBOL(acpi_map_cpu); 758 758 759 - int acpi_unmap_lsapic(int cpu) 759 + int acpi_unmap_cpu(int cpu) 760 760 { 761 761 #ifdef CONFIG_ACPI_NUMA 762 762 set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); ··· 768 768 769 769 return (0); 770 770 } 771 - 772 - EXPORT_SYMBOL(acpi_unmap_lsapic); 771 + EXPORT_SYMBOL(acpi_unmap_cpu); 773 772 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 774 773 775 774 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
+1
arch/x86/kernel/cpu/Makefile
··· 66 66 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE 67 67 $(call if_changed,mkcapflags) 68 68 endif 69 + clean-files += capflags.c
+1 -1
arch/x86/kernel/cpu/mkcapflags.sh
··· 28 28 # If the /* comment */ starts with a quote string, grab that. 29 29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" 30 30 [ -z "$VALUE" ] && VALUE="\"$NAME\"" 31 - [ "$VALUE" == '""' ] && continue 31 + [ "$VALUE" = '""' ] && continue 32 32 33 33 # Name is uppercase, VALUE is all lowercase 34 34 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
+2 -2
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 568 568 }; 569 569 570 570 struct event_constraint intel_slm_pebs_event_constraints[] = { 571 - /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ 572 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), 571 + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ 572 + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), 573 573 /* Allow all events as PEBS with no flags */ 574 574 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), 575 575 EVENT_CONSTRAINT_END
+32 -12
arch/x86/kernel/cpu/perf_event_intel_rapl.c
··· 103 103 104 104 #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ 105 105 106 + #define RAPL_EVENT_ATTR_STR(_name, v, str) \ 107 + static struct perf_pmu_events_attr event_attr_##v = { \ 108 + .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \ 109 + .id = 0, \ 110 + .event_str = str, \ 111 + }; 112 + 106 113 struct rapl_pmu { 107 114 spinlock_t lock; 108 115 int hw_unit; /* 1/2^hw_unit Joule */ ··· 386 379 .attrs = rapl_pmu_attrs, 387 380 }; 388 381 389 - EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); 390 - EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); 391 - EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); 392 - EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); 382 + static ssize_t rapl_sysfs_show(struct device *dev, 383 + struct device_attribute *attr, 384 + char *page) 385 + { 386 + struct perf_pmu_events_attr *pmu_attr = \ 387 + container_of(attr, struct perf_pmu_events_attr, attr); 393 388 394 - EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); 395 - EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); 396 - EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); 397 - EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); 389 + if (pmu_attr->event_str) 390 + return sprintf(page, "%s", pmu_attr->event_str); 391 + 392 + return 0; 393 + } 394 + 395 + RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); 396 + RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); 397 + RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); 398 + RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); 399 + 400 + RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); 401 + RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); 402 + RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); 403 + RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); 398 404 399 405 /* 400 406 * we compute in 0.23 nJ increments regardless of MSR 401 407 */ 402 - EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); 403 - EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); 404 - EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); 405 - EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); 408 + RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); 409 + RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); 410 + RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); 411 + RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); 406 412 407 413 static struct attribute *rapl_events_srv_attr[] = { 408 414 EVENT_PTR(rapl_cores),
+1 -1
arch/x86/kernel/cpu/perf_event_intel_uncore.h
··· 17 17 #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 18 18 #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 19 19 #define UNCORE_EXTRA_PCI_DEV 0xff 20 - #define UNCORE_EXTRA_PCI_DEV_MAX 2 20 + #define UNCORE_EXTRA_PCI_DEV_MAX 3 21 21 22 22 /* support up to 8 sockets */ 23 23 #define UNCORE_SOCKET_MAX 8
+17
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
··· 891 891 enum { 892 892 SNBEP_PCI_QPI_PORT0_FILTER, 893 893 SNBEP_PCI_QPI_PORT1_FILTER, 894 + HSWEP_PCI_PCU_3, 894 895 }; 895 896 896 897 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) ··· 2027 2026 { 2028 2027 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2029 2028 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2029 + 2030 + /* Detect 6-8 core systems with only two SBOXes */ 2031 + if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) { 2032 + u32 capid4; 2033 + 2034 + pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3], 2035 + 0x94, &capid4); 2036 + if (((capid4 >> 6) & 0x3) == 0) 2037 + hswep_uncore_sbox.num_boxes = 2; 2038 + } 2039 + 2030 2040 uncore_msr_uncores = hswep_msr_uncores; 2031 2041 } 2032 2042 ··· 2298 2286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96), 2299 2287 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2300 2288 SNBEP_PCI_QPI_PORT1_FILTER), 2289 + }, 2290 + { /* PCU.3 (for Capability registers) */ 2291 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), 2292 + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2293 + HSWEP_PCI_PCU_3), 2301 2294 }, 2302 2295 { /* end: all zeroes */ } 2303 2296 };
+15 -5
arch/x86/kernel/kprobes/core.c
··· 1020 1020 regs->flags &= ~X86_EFLAGS_IF; 1021 1021 trace_hardirqs_off(); 1022 1022 regs->ip = (unsigned long)(jp->entry); 1023 + 1024 + /* 1025 + * jprobes use jprobe_return() which skips the normal return 1026 + * path of the function, and this messes up the accounting of the 1027 + * function graph tracer to get messed up. 1028 + * 1029 + * Pause function graph tracing while performing the jprobe function. 1030 + */ 1031 + pause_graph_tracing(); 1023 1032 return 1; 1024 1033 } 1025 1034 NOKPROBE_SYMBOL(setjmp_pre_handler); ··· 1057 1048 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1058 1049 u8 *addr = (u8 *) (regs->ip - 1); 1059 1050 struct jprobe *jp = container_of(p, struct jprobe, kp); 1051 + void *saved_sp = kcb->jprobe_saved_sp; 1060 1052 1061 1053 if ((addr > (u8 *) jprobe_return) && 1062 1054 (addr < (u8 *) jprobe_return_end)) { 1063 - if (stack_addr(regs) != kcb->jprobe_saved_sp) { 1055 + if (stack_addr(regs) != saved_sp) { 1064 1056 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 1065 1057 printk(KERN_ERR 1066 1058 "current sp %p does not match saved sp %p\n", 1067 - stack_addr(regs), kcb->jprobe_saved_sp); 1059 + stack_addr(regs), saved_sp); 1068 1060 printk(KERN_ERR "Saved registers for jprobe %p\n", jp); 1069 1061 show_regs(saved_regs); 1070 1062 printk(KERN_ERR "Current registers\n"); 1071 1063 show_regs(regs); 1072 1064 BUG(); 1073 1065 } 1066 + /* It's OK to start function graph tracing again */ 1067 + unpause_graph_tracing(); 1074 1068 *regs = kcb->jprobe_saved_regs; 1075 - memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), 1076 - kcb->jprobes_stack, 1077 - MIN_STACK_SIZE(kcb->jprobe_saved_sp)); 1069 + memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); 1078 1070 preempt_enable_no_resched(); 1079 1071 return 1; 1080 1072 }
+90
arch/x86/kernel/perf_regs.c
··· 78 78 { 79 79 return PERF_SAMPLE_REGS_ABI_32; 80 80 } 81 + 82 + void perf_get_regs_user(struct perf_regs *regs_user, 83 + struct pt_regs *regs, 84 + struct pt_regs *regs_user_copy) 85 + { 86 + regs_user->regs = task_pt_regs(current); 87 + regs_user->abi = perf_reg_abi(current); 88 + } 81 89 #else /* CONFIG_X86_64 */ 82 90 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ 83 91 (1ULL << PERF_REG_X86_ES) | \ ··· 109 101 return PERF_SAMPLE_REGS_ABI_32; 110 102 else 111 103 return PERF_SAMPLE_REGS_ABI_64; 104 + } 105 + 106 + void perf_get_regs_user(struct perf_regs *regs_user, 107 + struct pt_regs *regs, 108 + struct pt_regs *regs_user_copy) 109 + { 110 + struct pt_regs *user_regs = task_pt_regs(current); 111 + 112 + /* 113 + * If we're in an NMI that interrupted task_pt_regs setup, then 114 + * we can't sample user regs at all. This check isn't really 115 + * sufficient, though, as we could be in an NMI inside an interrupt 116 + * that happened during task_pt_regs setup. 117 + */ 118 + if (regs->sp > (unsigned long)&user_regs->r11 && 119 + regs->sp <= (unsigned long)(user_regs + 1)) { 120 + regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 121 + regs_user->regs = NULL; 122 + return; 123 + } 124 + 125 + /* 126 + * RIP, flags, and the argument registers are usually saved. 127 + * orig_ax is probably okay, too. 128 + */ 129 + regs_user_copy->ip = user_regs->ip; 130 + regs_user_copy->cx = user_regs->cx; 131 + regs_user_copy->dx = user_regs->dx; 132 + regs_user_copy->si = user_regs->si; 133 + regs_user_copy->di = user_regs->di; 134 + regs_user_copy->r8 = user_regs->r8; 135 + regs_user_copy->r9 = user_regs->r9; 136 + regs_user_copy->r10 = user_regs->r10; 137 + regs_user_copy->r11 = user_regs->r11; 138 + regs_user_copy->orig_ax = user_regs->orig_ax; 139 + regs_user_copy->flags = user_regs->flags; 140 + 141 + /* 142 + * Don't even try to report the "rest" regs. 143 + */ 144 + regs_user_copy->bx = -1; 145 + regs_user_copy->bp = -1; 146 + regs_user_copy->r12 = -1; 147 + regs_user_copy->r13 = -1; 148 + regs_user_copy->r14 = -1; 149 + regs_user_copy->r15 = -1; 150 + 151 + /* 152 + * For this to be at all useful, we need a reasonable guess for 153 + * sp and the ABI. Be careful: we're in NMI context, and we're 154 + * considering current to be the current task, so we should 155 + * be careful not to look at any other percpu variables that might 156 + * change during context switches. 157 + */ 158 + if (IS_ENABLED(CONFIG_IA32_EMULATION) && 159 + task_thread_info(current)->status & TS_COMPAT) { 160 + /* Easy case: we're in a compat syscall. */ 161 + regs_user->abi = PERF_SAMPLE_REGS_ABI_32; 162 + regs_user_copy->sp = user_regs->sp; 163 + regs_user_copy->cs = user_regs->cs; 164 + regs_user_copy->ss = user_regs->ss; 165 + } else if (user_regs->orig_ax != -1) { 166 + /* 167 + * We're probably in a 64-bit syscall. 168 + * Warning: this code is severely racy. At least it's better 169 + * than just blindly copying user_regs. 170 + */ 171 + regs_user->abi = PERF_SAMPLE_REGS_ABI_64; 172 + regs_user_copy->sp = this_cpu_read(old_rsp); 173 + regs_user_copy->cs = __USER_CS; 174 + regs_user_copy->ss = __USER_DS; 175 + regs_user_copy->cx = -1; /* usually contains garbage */ 176 + } else { 177 + /* We're probably in an interrupt or exception. */ 178 + regs_user->abi = user_64bit_mode(user_regs) ? 179 + PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; 180 + regs_user_copy->sp = user_regs->sp; 181 + regs_user_copy->cs = user_regs->cs; 182 + regs_user_copy->ss = user_regs->ss; 183 + } 184 + 185 + regs_user->regs = regs_user_copy; 112 186 } 113 187 #endif /* CONFIG_X86_32 */
+1 -1
arch/x86/lib/insn.c
··· 28 28 29 29 /* Verify next sizeof(t) bytes can be on the same instruction */ 30 30 #define validate_next(t, insn, n) \ 31 - ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) 31 + ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 32 32 33 33 #define __get_next(t, insn) \ 34 34 ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+17 -20
arch/x86/mm/init.c
··· 438 438 static unsigned long __init get_new_step_size(unsigned long step_size) 439 439 { 440 440 /* 441 - * Explain why we shift by 5 and why we don't have to worry about 442 - * 'step_size << 5' overflowing: 443 - * 444 - * initial mapped size is PMD_SIZE (2M). 441 + * Initial mapped size is PMD_SIZE (2M). 445 442 * We can not set step_size to be PUD_SIZE (1G) yet. 446 443 * In worse case, when we cross the 1G boundary, and 447 444 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) 448 - * to map 1G range with PTE. Use 5 as shift for now. 445 + * to map 1G range with PTE. Hence we use one less than the 446 + * difference of page table level shifts. 449 447 * 450 - * Don't need to worry about overflow, on 32bit, when step_size 451 - * is 0, round_down() returns 0 for start, and that turns it 452 - * into 0x100000000ULL. 448 + * Don't need to worry about overflow in the top-down case, on 32bit, 449 + * when step_size is 0, round_down() returns 0 for start, and that 450 + * turns it into 0x100000000ULL. 451 + * In the bottom-up case, round_up(x, 0) returns 0 though too, which 452 + * needs to be taken into consideration by the code below. 453 453 */ 454 - return step_size << 5; 454 + return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); 455 455 } 456 456 457 457 /** ··· 471 471 unsigned long step_size; 472 472 unsigned long addr; 473 473 unsigned long mapped_ram_size = 0; 474 - unsigned long new_mapped_ram_size; 475 474 476 475 /* xen has big range in reserved near end of ram, skip it at first.*/ 477 476 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); ··· 495 496 start = map_start; 496 497 } else 497 498 start = map_start; 498 - new_mapped_ram_size = init_range_memory_mapping(start, 499 + mapped_ram_size += init_range_memory_mapping(start, 499 500 last_start); 500 501 last_start = start; 501 502 min_pfn_mapped = last_start >> PAGE_SHIFT; 502 - /* only increase step_size after big range get mapped */ 503 - if (new_mapped_ram_size > mapped_ram_size) 503 + if (mapped_ram_size >= step_size) 504 504 step_size = get_new_step_size(step_size); 505 - mapped_ram_size += new_mapped_ram_size; 506 505 } 507 506 508 507 if (real_end < map_end) ··· 521 524 static void __init memory_map_bottom_up(unsigned long map_start, 522 525 unsigned long map_end) 523 526 { 524 - unsigned long next, new_mapped_ram_size, start; 527 + unsigned long next, start; 525 528 unsigned long mapped_ram_size = 0; 526 529 /* step_size need to be small so pgt_buf from BRK could cover it */ 527 530 unsigned long step_size = PMD_SIZE; ··· 536 539 * for page table. 537 540 */ 538 541 while (start < map_end) { 539 - if (map_end - start > step_size) { 542 + if (step_size && map_end - start > step_size) { 540 543 next = round_up(start + 1, step_size); 541 544 if (next > map_end) 542 545 next = map_end; 543 - } else 546 + } else { 544 547 next = map_end; 548 + } 545 549 546 - new_mapped_ram_size = init_range_memory_mapping(start, next); 550 + mapped_ram_size += init_range_memory_mapping(start, next); 547 551 start = next; 548 552 549 - if (new_mapped_ram_size > mapped_ram_size) 553 + if (mapped_ram_size >= step_size) 550 554 step_size = get_new_step_size(step_size); 551 - mapped_ram_size += new_mapped_ram_size; 552 555 } 553 556 } 554 557
+1 -1
arch/x86/um/sys_call_table_32.c
··· 34 34 35 35 extern asmlinkage void sys_ni_syscall(void); 36 36 37 - const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 37 + const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { 38 38 /* 39 39 * Smells like a compiler bug -- it doesn't work 40 40 * when the & below is removed.
+1 -1
arch/x86/um/sys_call_table_64.c
··· 47 47 48 48 extern void sys_ni_syscall(void); 49 49 50 - const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 50 + const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = { 51 51 /* 52 52 * Smells like a compiler bug -- it doesn't work 53 53 * when the & below is removed.
+29 -16
arch/x86/vdso/vma.c
··· 41 41 42 42 struct linux_binprm; 43 43 44 - /* Put the vdso above the (randomized) stack with another randomized offset. 45 - This way there is no hole in the middle of address space. 46 - To save memory make sure it is still in the same PTE as the stack top. 47 - This doesn't give that many random bits. 48 - 49 - Only used for the 64-bit and x32 vdsos. */ 44 + /* 45 + * Put the vdso above the (randomized) stack with another randomized 46 + * offset. This way there is no hole in the middle of address space. 47 + * To save memory make sure it is still in the same PTE as the stack 48 + * top. This doesn't give that many random bits. 49 + * 50 + * Note that this algorithm is imperfect: the distribution of the vdso 51 + * start address within a PMD is biased toward the end. 52 + * 53 + * Only used for the 64-bit and x32 vdsos. 54 + */ 50 55 static unsigned long vdso_addr(unsigned long start, unsigned len) 51 56 { 52 57 #ifdef CONFIG_X86_32 ··· 59 54 #else 60 55 unsigned long addr, end; 61 56 unsigned offset; 62 - end = (start + PMD_SIZE - 1) & PMD_MASK; 57 + 58 + /* 59 + * Round up the start address. It can start out unaligned as a result 60 + * of stack start randomization. 61 + */ 62 + start = PAGE_ALIGN(start); 63 + 64 + /* Round the lowest possible end address up to a PMD boundary. */ 65 + end = (start + len + PMD_SIZE - 1) & PMD_MASK; 63 66 if (end >= TASK_SIZE_MAX) 64 67 end = TASK_SIZE_MAX; 65 68 end -= len; 66 - /* This loses some more bits than a modulo, but is cheaper */ 67 - offset = get_random_int() & (PTRS_PER_PTE - 1); 68 - addr = start + (offset << PAGE_SHIFT); 69 - if (addr >= end) 70 - addr = end; 69 + 70 + if (end > start) { 71 + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 72 + addr = start + (offset << PAGE_SHIFT); 73 + } else { 74 + addr = start; 75 + } 71 76 72 77 /* 73 - * page-align it here so that get_unmapped_area doesn't 74 - * align it wrongfully again to the next page. addr can come in 4K 75 - * unaligned here as a result of stack start randomization. 78 + * Forcibly align the final address in case we have a hardware 79 + * issue that requires alignment for performance reasons. 76 80 */ 77 - addr = PAGE_ALIGN(addr); 78 81 addr = align_vdso_addr(addr); 79 82 80 83 return addr;
+21 -1
arch/x86/xen/enlighten.c
··· 40 40 #include <xen/interface/physdev.h> 41 41 #include <xen/interface/vcpu.h> 42 42 #include <xen/interface/memory.h> 43 + #include <xen/interface/nmi.h> 43 44 #include <xen/interface/xen-mca.h> 44 45 #include <xen/features.h> 45 46 #include <xen/page.h> ··· 67 66 #include <asm/reboot.h> 68 67 #include <asm/stackprotector.h> 69 68 #include <asm/hypervisor.h> 69 + #include <asm/mach_traps.h> 70 70 #include <asm/mwait.h> 71 71 #include <asm/pci_x86.h> 72 72 #include <asm/pat.h> ··· 1353 1351 .emergency_restart = xen_emergency_restart, 1354 1352 }; 1355 1353 1354 + static unsigned char xen_get_nmi_reason(void) 1355 + { 1356 + unsigned char reason = 0; 1357 + 1358 + /* Construct a value which looks like it came from port 0x61. */ 1359 + if (test_bit(_XEN_NMIREASON_io_error, 1360 + &HYPERVISOR_shared_info->arch.nmi_reason)) 1361 + reason |= NMI_REASON_IOCHK; 1362 + if (test_bit(_XEN_NMIREASON_pci_serr, 1363 + &HYPERVISOR_shared_info->arch.nmi_reason)) 1364 + reason |= NMI_REASON_SERR; 1365 + 1366 + return reason; 1367 + } 1368 + 1356 1369 static void __init xen_boot_params_init_edd(void) 1357 1370 { 1358 1371 #if IS_ENABLED(CONFIG_EDD) ··· 1552 1535 pv_info = xen_info; 1553 1536 pv_init_ops = xen_init_ops; 1554 1537 pv_apic_ops = xen_apic_ops; 1555 - if (!xen_pvh_domain()) 1538 + if (!xen_pvh_domain()) { 1556 1539 pv_cpu_ops = xen_cpu_ops; 1540 + 1541 + x86_platform.get_nmi_reason = xen_get_nmi_reason; 1542 + } 1557 1543 1558 1544 if (xen_feature(XENFEAT_auto_translated_physmap)) 1559 1545 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
+10 -10
arch/x86/xen/p2m.c
··· 167 167 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); 168 168 } 169 169 170 - /* Only to be called in case of a race for a page just allocated! */ 171 - static void free_p2m_page(void *p) 170 + static void __ref free_p2m_page(void *p) 172 171 { 173 - BUG_ON(!slab_is_available()); 172 + if (unlikely(!slab_is_available())) { 173 + free_bootmem((unsigned long)p, PAGE_SIZE); 174 + return; 175 + } 176 + 174 177 free_page((unsigned long)p); 175 178 } 176 179 ··· 378 375 p2m_missing_pte : p2m_identity_pte; 379 376 for (i = 0; i < PMDS_PER_MID_PAGE; i++) { 380 377 pmdp = populate_extra_pmd( 381 - (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); 378 + (unsigned long)(p2m + pfn) + i * PMD_SIZE); 382 379 set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); 383 380 } 384 381 } ··· 439 436 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual 440 437 * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! 441 438 */ 442 - static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) 439 + static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) 443 440 { 444 441 pte_t *ptechk; 445 - pte_t *pteret = ptep; 446 442 pte_t *pte_newpg[PMDS_PER_MID_PAGE]; 447 443 pmd_t *pmdp; 448 444 unsigned int level; ··· 475 473 if (ptechk == pte_pg) { 476 474 set_pmd(pmdp, 477 475 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); 478 - if (vaddr == (addr & ~(PMD_SIZE - 1))) 479 - pteret = pte_offset_kernel(pmdp, addr); 480 476 pte_newpg[i] = NULL; 481 477 } 482 478 ··· 488 488 vaddr += PMD_SIZE; 489 489 } 490 490 491 - return pteret; 491 + return lookup_address(addr, &level); 492 492 } 493 493 494 494 /* ··· 517 517 518 518 if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { 519 519 /* PMD level is missing, allocate a new one */ 520 - ptep = alloc_p2m_pmd(addr, ptep, pte_pg); 520 + ptep = alloc_p2m_pmd(addr, pte_pg); 521 521 if (!ptep) 522 522 return false; 523 523 }
+20 -22
arch/x86/xen/setup.c
··· 140 140 unsigned long __ref xen_chk_extra_mem(unsigned long pfn) 141 141 { 142 142 int i; 143 - unsigned long addr = PFN_PHYS(pfn); 143 + phys_addr_t addr = PFN_PHYS(pfn); 144 144 145 145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 146 146 if (addr >= xen_extra_mem[i].start && ··· 160 160 int i; 161 161 162 162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 163 + if (!xen_extra_mem[i].size) 164 + continue; 163 165 pfn_s = PFN_DOWN(xen_extra_mem[i].start); 164 166 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); 165 167 for (pfn = pfn_s; pfn < pfn_e; pfn++) ··· 231 229 * as a fallback if the remapping fails. 232 230 */ 233 231 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, 234 - unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, 235 - unsigned long *released) 232 + unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) 236 233 { 237 - unsigned long len = 0; 238 234 unsigned long pfn, end; 239 235 int ret; 240 236 241 237 WARN_ON(start_pfn > end_pfn); 242 238 239 + /* Release pages first. */ 243 240 end = min(end_pfn, nr_pages); 244 241 for (pfn = start_pfn; pfn < end; pfn++) { 245 242 unsigned long mfn = pfn_to_mfn(pfn); ··· 251 250 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 252 251 253 252 if (ret == 1) { 253 + (*released)++; 254 254 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) 255 255 break; 256 - len++; 257 256 } else 258 257 break; 259 258 } 260 259 261 - /* Need to release pages first */ 262 - *released += len; 263 - *identity += set_phys_range_identity(start_pfn, end_pfn); 260 + set_phys_range_identity(start_pfn, end_pfn); 264 261 } 265 262 266 263 /* ··· 286 287 } 287 288 288 289 /* Update kernel mapping, but not for highmem. */ 289 - if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) 290 + if (pfn >= PFN_UP(__pa(high_memory - 1))) 290 291 return; 291 292 292 293 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), ··· 317 318 unsigned long ident_pfn_iter, remap_pfn_iter; 318 319 unsigned long ident_end_pfn = start_pfn + size; 319 320 unsigned long left = size; 320 - unsigned long ident_cnt = 0; 321 321 unsigned int i, chunk; 322 322 323 323 WARN_ON(size == 0); ··· 345 347 xen_remap_mfn = mfn; 346 348 347 349 /* Set identity map */ 348 - ident_cnt += set_phys_range_identity(ident_pfn_iter, 349 - ident_pfn_iter + chunk); 350 + set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); 350 351 351 352 left -= chunk; 352 353 } ··· 368 371 static unsigned long __init xen_set_identity_and_remap_chunk( 369 372 const struct e820entry *list, size_t map_size, unsigned long start_pfn, 370 373 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, 371 - unsigned long *identity, unsigned long *released) 374 + unsigned long *released, unsigned long *remapped) 372 375 { 373 376 unsigned long pfn; 374 377 unsigned long i = 0; ··· 383 386 /* Do not remap pages beyond the current allocation */ 384 387 if (cur_pfn >= nr_pages) { 385 388 /* Identity map remaining pages */ 386 - *identity += set_phys_range_identity(cur_pfn, 387 - cur_pfn + size); 389 + set_phys_range_identity(cur_pfn, cur_pfn + size); 388 390 break; 389 391 } 390 392 if (cur_pfn + size > nr_pages) ··· 394 398 if (!remap_range_size) { 395 399 pr_warning("Unable to find available pfn range, not remapping identity pages\n"); 396 400 xen_set_identity_and_release_chunk(cur_pfn, 397 - cur_pfn + left, nr_pages, identity, released); 401 + cur_pfn + left, nr_pages, released); 398 402 break; 399 403 } 400 404 /* Adjust size to fit in current e820 RAM region */ ··· 406 410 /* Update variables to reflect new mappings. */ 407 411 i += size; 408 412 remap_pfn += size; 409 - *identity += size; 413 + *remapped += size; 410 414 } 411 415 412 416 /* ··· 423 427 424 428 static void __init xen_set_identity_and_remap( 425 429 const struct e820entry *list, size_t map_size, unsigned long nr_pages, 426 - unsigned long *released) 430 + unsigned long *released, unsigned long *remapped) 427 431 { 428 432 phys_addr_t start = 0; 429 - unsigned long identity = 0; 430 433 unsigned long last_pfn = nr_pages; 431 434 const struct e820entry *entry; 432 435 unsigned long num_released = 0; 436 + unsigned long num_remapped = 0; 433 437 int i; 434 438 435 439 /* ··· 456 460 last_pfn = xen_set_identity_and_remap_chunk( 457 461 list, map_size, start_pfn, 458 462 end_pfn, nr_pages, last_pfn, 459 - &identity, &num_released); 463 + &num_released, &num_remapped); 460 464 start = end; 461 465 } 462 466 } 463 467 464 468 *released = num_released; 469 + *remapped = num_remapped; 465 470 466 - pr_info("Set %ld page(s) to 1-1 mapping\n", identity); 467 471 pr_info("Released %ld page(s)\n", num_released); 468 472 } 469 473 ··· 582 586 struct xen_memory_map memmap; 583 587 unsigned long max_pages; 584 588 unsigned long extra_pages = 0; 589 + unsigned long remapped_pages; 585 590 int i; 586 591 int op; 587 592 ··· 632 635 * underlying RAM. 633 636 */ 634 637 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, 635 - &xen_released_pages); 638 + &xen_released_pages, &remapped_pages); 636 639 637 640 extra_pages += xen_released_pages; 641 + extra_pages += remapped_pages; 638 642 639 643 /* 640 644 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
+5 -13
arch/x86/xen/time.c
··· 391 391 392 392 struct xen_clock_event_device { 393 393 struct clock_event_device evt; 394 - char *name; 394 + char name[16]; 395 395 }; 396 396 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; 397 397 ··· 420 420 if (evt->irq >= 0) { 421 421 unbind_from_irqhandler(evt->irq, NULL); 422 422 evt->irq = -1; 423 - kfree(per_cpu(xen_clock_events, cpu).name); 424 - per_cpu(xen_clock_events, cpu).name = NULL; 425 423 } 426 424 } 427 425 428 426 void xen_setup_timer(int cpu) 429 427 { 430 - char *name; 431 - struct clock_event_device *evt; 428 + struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); 429 + struct clock_event_device *evt = &xevt->evt; 432 430 int irq; 433 431 434 - evt = &per_cpu(xen_clock_events, cpu).evt; 435 432 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); 436 433 if (evt->irq >= 0) 437 434 xen_teardown_timer(cpu); 438 435 439 436 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); 440 437 441 - name = kasprintf(GFP_KERNEL, "timer%d", cpu); 442 - if (!name) 443 - name = "<timer kasprintf failed>"; 438 + snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); 444 439 445 440 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, 446 441 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| 447 442 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, 448 - name, NULL); 443 + xevt->name, NULL); 449 444 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); 450 445 451 446 memcpy(evt, xen_clockevent, sizeof(*evt)); 452 447 453 448 evt->cpumask = cpumask_of(cpu); 454 449 evt->irq = irq; 455 - per_cpu(xen_clock_events, cpu).name = name; 456 450 } 457 451 458 452 459 453 void xen_setup_cpu_clockevents(void) 460 454 { 461 - BUG_ON(preemptible()); 462 - 463 455 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); 464 456 } 465 457
+20 -1
block/blk-core.c
··· 473 473 } 474 474 EXPORT_SYMBOL_GPL(blk_queue_bypass_end); 475 475 476 + void blk_set_queue_dying(struct request_queue *q) 477 + { 478 + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 479 + 480 + if (q->mq_ops) 481 + blk_mq_wake_waiters(q); 482 + else { 483 + struct request_list *rl; 484 + 485 + blk_queue_for_each_rl(rl, q) { 486 + if (rl->rq_pool) { 487 + wake_up(&rl->wait[BLK_RW_SYNC]); 488 + wake_up(&rl->wait[BLK_RW_ASYNC]); 489 + } 490 + } 491 + } 492 + } 493 + EXPORT_SYMBOL_GPL(blk_set_queue_dying); 494 + 476 495 /** 477 496 * blk_cleanup_queue - shutdown a request queue 478 497 * @q: request queue to shutdown ··· 505 486 506 487 /* mark @q DYING, no new request or merges will be allowed afterwards */ 507 488 mutex_lock(&q->sysfs_lock); 508 - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); 489 + blk_set_queue_dying(q); 509 490 spin_lock_irq(lock); 510 491 511 492 /*
+10 -4
block/blk-mq-tag.c
··· 68 68 } 69 69 70 70 /* 71 - * Wakeup all potentially sleeping on normal (non-reserved) tags 71 + * Wakeup all potentially sleeping on tags 72 72 */ 73 - static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) 73 + void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) 74 74 { 75 75 struct blk_mq_bitmap_tags *bt; 76 76 int i, wake_index; ··· 84 84 wake_up(&bs->wait); 85 85 86 86 wake_index = bt_index_inc(wake_index); 87 + } 88 + 89 + if (include_reserve) { 90 + bt = &tags->breserved_tags; 91 + if (waitqueue_active(&bt->bs[0].wait)) 92 + wake_up(&bt->bs[0].wait); 87 93 } 88 94 } 89 95 ··· 106 100 107 101 atomic_dec(&tags->active_queues); 108 102 109 - blk_mq_tag_wakeup_all(tags); 103 + blk_mq_tag_wakeup_all(tags, false); 110 104 } 111 105 112 106 /* ··· 590 584 * static and should never need resizing. 591 585 */ 592 586 bt_update_count(&tags->bitmap_tags, tdepth); 593 - blk_mq_tag_wakeup_all(tags); 587 + blk_mq_tag_wakeup_all(tags, false); 594 588 return 0; 595 589 } 596 590
+1
block/blk-mq-tag.h
··· 54 54 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); 55 55 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); 56 56 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); 57 + extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); 57 58 58 59 enum { 59 60 BLK_MQ_TAG_CACHE_MIN = 1,
+69 -6
block/blk-mq.c
··· 107 107 wake_up_all(&q->mq_freeze_wq); 108 108 } 109 109 110 - static void blk_mq_freeze_queue_start(struct request_queue *q) 110 + void blk_mq_freeze_queue_start(struct request_queue *q) 111 111 { 112 112 bool freeze; 113 113 ··· 120 120 blk_mq_run_queues(q, false); 121 121 } 122 122 } 123 + EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); 123 124 124 125 static void blk_mq_freeze_queue_wait(struct request_queue *q) 125 126 { ··· 137 136 blk_mq_freeze_queue_wait(q); 138 137 } 139 138 140 - static void blk_mq_unfreeze_queue(struct request_queue *q) 139 + void blk_mq_unfreeze_queue(struct request_queue *q) 141 140 { 142 141 bool wake; 143 142 ··· 149 148 percpu_ref_reinit(&q->mq_usage_counter); 150 149 wake_up_all(&q->mq_freeze_wq); 151 150 } 151 + } 152 + EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); 153 + 154 + void blk_mq_wake_waiters(struct request_queue *q) 155 + { 156 + struct blk_mq_hw_ctx *hctx; 157 + unsigned int i; 158 + 159 + queue_for_each_hw_ctx(q, hctx, i) 160 + if (blk_mq_hw_queue_mapped(hctx)) 161 + blk_mq_tag_wakeup_all(hctx->tags, true); 162 + 163 + /* 164 + * If we are called because the queue has now been marked as 165 + * dying, we need to ensure that processes currently waiting on 166 + * the queue are notified as well. 167 + */ 168 + wake_up_all(&q->mq_freeze_wq); 152 169 } 153 170 154 171 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) ··· 277 258 ctx = alloc_data.ctx; 278 259 } 279 260 blk_mq_put_ctx(ctx); 280 - if (!rq) 261 + if (!rq) { 262 + blk_mq_queue_exit(q); 281 263 return ERR_PTR(-EWOULDBLOCK); 264 + } 282 265 return rq; 283 266 } 284 267 EXPORT_SYMBOL(blk_mq_alloc_request); ··· 404 383 } 405 384 EXPORT_SYMBOL(blk_mq_complete_request); 406 385 386 + int blk_mq_request_started(struct request *rq) 387 + { 388 + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 389 + } 390 + EXPORT_SYMBOL_GPL(blk_mq_request_started); 391 + 407 392 void blk_mq_start_request(struct request *rq) 408 393 { 409 394 struct request_queue *q = rq->q; ··· 527 500 } 528 501 EXPORT_SYMBOL(blk_mq_add_to_requeue_list); 529 502 503 + void blk_mq_cancel_requeue_work(struct request_queue *q) 504 + { 505 + cancel_work_sync(&q->requeue_work); 506 + } 507 + EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); 508 + 530 509 void blk_mq_kick_requeue_list(struct request_queue *q) 531 510 { 532 511 kblockd_schedule_work(&q->requeue_work); 533 512 } 534 513 EXPORT_SYMBOL(blk_mq_kick_requeue_list); 514 + 515 + void blk_mq_abort_requeue_list(struct request_queue *q) 516 + { 517 + unsigned long flags; 518 + LIST_HEAD(rq_list); 519 + 520 + spin_lock_irqsave(&q->requeue_lock, flags); 521 + list_splice_init(&q->requeue_list, &rq_list); 522 + spin_unlock_irqrestore(&q->requeue_lock, flags); 523 + 524 + while (!list_empty(&rq_list)) { 525 + struct request *rq; 526 + 527 + rq = list_first_entry(&rq_list, struct request, queuelist); 528 + list_del_init(&rq->queuelist); 529 + rq->errors = -EIO; 530 + blk_mq_end_request(rq, rq->errors); 531 + } 532 + } 533 + EXPORT_SYMBOL(blk_mq_abort_requeue_list); 535 534 536 535 static inline bool is_flush_request(struct request *rq, 537 536 struct blk_flush_queue *fq, unsigned int tag) ··· 619 566 break; 620 567 } 621 568 } 622 - 569 + 623 570 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, 624 571 struct request *rq, void *priv, bool reserved) 625 572 { 626 573 struct blk_mq_timeout_data *data = priv; 627 574 628 - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) 575 + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { 576 + /* 577 + * If a request wasn't started before the queue was 578 + * marked dying, kill it here or it'll go unnoticed. 579 + */ 580 + if (unlikely(blk_queue_dying(rq->q))) { 581 + rq->errors = -EIO; 582 + blk_mq_complete_request(rq); 583 + } 584 + return; 585 + } 586 + if (rq->cmd_flags & REQ_NO_TIMEOUT) 629 587 return; 630 588 631 589 if (time_after_eq(jiffies, rq->deadline)) { ··· 1665 1601 hctx->queue = q; 1666 1602 hctx->queue_num = hctx_idx; 1667 1603 hctx->flags = set->flags; 1668 - hctx->cmd_size = set->cmd_size; 1669 1604 1670 1605 blk_mq_init_cpu_notifier(&hctx->cpu_notifier, 1671 1606 blk_mq_hctx_notify, hctx);
+1
block/blk-mq.h
··· 32 32 void blk_mq_clone_flush_request(struct request *flush_rq, 33 33 struct request *orig_rq); 34 34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 35 + void blk_mq_wake_waiters(struct request_queue *q); 35 36 36 37 /* 37 38 * CPU hotplug helpers
+3
block/blk-timeout.c
··· 190 190 struct request_queue *q = req->q; 191 191 unsigned long expiry; 192 192 193 + if (req->cmd_flags & REQ_NO_TIMEOUT) 194 + return; 195 + 193 196 /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ 194 197 if (!q->mq_ops && !q->rq_timed_out_fn) 195 198 return;
+3
crypto/af_alg.c
··· 455 455 { 456 456 struct af_alg_completion *completion = req->data; 457 457 458 + if (err == -EINPROGRESS) 459 + return; 460 + 458 461 completion->err = err; 459 462 complete(&completion->completion); 460 463 }
+4 -2
drivers/Makefile
··· 50 50 obj-y += tty/ 51 51 obj-y += char/ 52 52 53 - # gpu/ comes after char for AGP vs DRM startup 53 + # iommu/ comes before gpu as gpu are using iommu controllers 54 + obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ 55 + 56 + # gpu/ comes after char for AGP vs DRM startup and after iommu 54 57 obj-y += gpu/ 55 58 56 59 obj-$(CONFIG_CONNECTOR) += connector/ ··· 144 141 145 142 obj-$(CONFIG_MAILBOX) += mailbox/ 146 143 obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ 147 - obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ 148 144 obj-$(CONFIG_REMOTEPROC) += remoteproc/ 149 145 obj-$(CONFIG_RPMSG) += rpmsg/ 150 146
+14 -11
drivers/acpi/acpi_processor.c
··· 170 170 acpi_status status; 171 171 int ret; 172 172 173 - if (pr->apic_id == -1) 173 + if (pr->phys_id == -1) 174 174 return -ENODEV; 175 175 176 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); ··· 180 180 cpu_maps_update_begin(); 181 181 cpu_hotplug_begin(); 182 182 183 - ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); 183 + ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); 184 184 if (ret) 185 185 goto out; 186 186 187 187 ret = arch_register_cpu(pr->id); 188 188 if (ret) { 189 - acpi_unmap_lsapic(pr->id); 189 + acpi_unmap_cpu(pr->id); 190 190 goto out; 191 191 } 192 192 ··· 215 215 union acpi_object object = { 0 }; 216 216 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 217 217 struct acpi_processor *pr = acpi_driver_data(device); 218 - int apic_id, cpu_index, device_declaration = 0; 218 + int phys_id, cpu_index, device_declaration = 0; 219 219 acpi_status status = AE_OK; 220 220 static int cpu0_initialized; 221 221 unsigned long long value; ··· 262 262 pr->acpi_id = value; 263 263 } 264 264 265 - apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 266 - if (apic_id < 0) 267 - acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 268 - pr->apic_id = apic_id; 265 + phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); 266 + if (phys_id < 0) 267 + acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); 268 + pr->phys_id = phys_id; 269 269 270 - cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 270 + cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); 271 271 if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { 272 272 cpu0_initialized = 1; 273 - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 273 + /* 274 + * Handle UP system running SMP kernel, with no CPU 275 + * entry in MADT 276 + */ 274 277 if ((cpu_index == -1) && (num_online_cpus() == 1)) 275 278 cpu_index = 0; 276 279 } ··· 461 458 462 459 /* Remove the CPU. */ 463 460 arch_unregister_cpu(pr->id); 464 - acpi_unmap_lsapic(pr->id); 461 + acpi_unmap_cpu(pr->id); 465 462 466 463 cpu_hotplug_done(); 467 464 cpu_maps_update_done();
+1 -1
drivers/acpi/device_pm.c
··· 257 257 258 258 device->power.state = ACPI_STATE_UNKNOWN; 259 259 if (!acpi_device_is_present(device)) 260 - return 0; 260 + return -ENXIO; 261 261 262 262 result = acpi_device_get_power(device, &state); 263 263 if (result)
+7 -4
drivers/acpi/int340x_thermal.c
··· 14 14 15 15 #include "internal.h" 16 16 17 - #define DO_ENUMERATION 0x01 17 + #define INT3401_DEVICE 0X01 18 18 static const struct acpi_device_id int340x_thermal_device_ids[] = { 19 - {"INT3400", DO_ENUMERATION }, 20 - {"INT3401"}, 19 + {"INT3400"}, 20 + {"INT3401", INT3401_DEVICE}, 21 21 {"INT3402"}, 22 22 {"INT3403"}, 23 23 {"INT3404"}, ··· 34 34 const struct acpi_device_id *id) 35 35 { 36 36 #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) 37 - if (id->driver_data == DO_ENUMERATION) 37 + acpi_create_platform_device(adev); 38 + #elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) 39 + /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ 40 + if (id->driver_data == INT3401_DEVICE) 38 41 acpi_create_platform_device(adev); 39 42 #endif 40 43 return 1;
+28 -28
drivers/acpi/processor_core.c
··· 69 69 unsigned long madt_end, entry; 70 70 static struct acpi_table_madt *madt; 71 71 static int read_madt; 72 - int apic_id = -1; 72 + int phys_id = -1; /* CPU hardware ID */ 73 73 74 74 if (!read_madt) { 75 75 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, ··· 79 79 } 80 80 81 81 if (!madt) 82 - return apic_id; 82 + return phys_id; 83 83 84 84 entry = (unsigned long)madt; 85 85 madt_end = entry + madt->header.length; ··· 91 91 struct acpi_subtable_header *header = 92 92 (struct acpi_subtable_header *)entry; 93 93 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 94 - if (!map_lapic_id(header, acpi_id, &apic_id)) 94 + if (!map_lapic_id(header, acpi_id, &phys_id)) 95 95 break; 96 96 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 97 - if (!map_x2apic_id(header, type, acpi_id, &apic_id)) 97 + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) 98 98 break; 99 99 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 100 - if (!map_lsapic_id(header, type, acpi_id, &apic_id)) 100 + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) 101 101 break; 102 102 } 103 103 entry += header->length; 104 104 } 105 - return apic_id; 105 + return phys_id; 106 106 } 107 107 108 108 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) ··· 110 110 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 111 111 union acpi_object *obj; 112 112 struct acpi_subtable_header *header; 113 - int apic_id = -1; 113 + int phys_id = -1; 114 114 115 115 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 116 116 goto exit; ··· 126 126 127 127 header = (struct acpi_subtable_header *)obj->buffer.pointer; 128 128 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) 129 - map_lapic_id(header, acpi_id, &apic_id); 129 + map_lapic_id(header, acpi_id, &phys_id); 130 130 else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) 131 - map_lsapic_id(header, type, acpi_id, &apic_id); 131 + map_lsapic_id(header, type, acpi_id, &phys_id); 132 132 else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) 133 - map_x2apic_id(header, type, acpi_id, &apic_id); 133 + map_x2apic_id(header, type, acpi_id, &phys_id); 134 134 135 135 exit: 136 136 kfree(buffer.pointer); 137 - return apic_id; 137 + return phys_id; 138 138 } 139 139 140 - int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) 140 + int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) 141 141 { 142 - int apic_id; 142 + int phys_id; 143 143 144 - apic_id = map_mat_entry(handle, type, acpi_id); 145 - if (apic_id == -1) 146 - apic_id = map_madt_entry(type, acpi_id); 144 + phys_id = map_mat_entry(handle, type, acpi_id); 145 + if (phys_id == -1) 146 + phys_id = map_madt_entry(type, acpi_id); 147 147 148 - return apic_id; 148 + return phys_id; 149 149 } 150 150 151 - int acpi_map_cpuid(int apic_id, u32 acpi_id) 151 + int acpi_map_cpuid(int phys_id, u32 acpi_id) 152 152 { 153 153 #ifdef CONFIG_SMP 154 154 int i; 155 155 #endif 156 156 157 - if (apic_id == -1) { 157 + if (phys_id == -1) { 158 158 /* 159 159 * On UP processor, there is no _MAT or MADT table. 160 - * So above apic_id is always set to -1. 160 + * So above phys_id is always set to -1. 161 161 * 162 162 * BIOS may define multiple CPU handles even for UP processor. 163 163 * For example, ··· 170 170 * Processor (CPU3, 0x03, 0x00000410, 0x06) {} 171 171 * } 172 172 * 173 - * Ignores apic_id and always returns 0 for the processor 173 + * Ignores phys_id and always returns 0 for the processor 174 174 * handle with acpi id 0 if nr_cpu_ids is 1. 175 175 * This should be the case if SMP tables are not found. 176 176 * Return -1 for other CPU's handle. ··· 178 178 if (nr_cpu_ids <= 1 && acpi_id == 0) 179 179 return acpi_id; 180 180 else 181 - return apic_id; 181 + return phys_id; 182 182 } 183 183 184 184 #ifdef CONFIG_SMP 185 185 for_each_possible_cpu(i) { 186 - if (cpu_physical_id(i) == apic_id) 186 + if (cpu_physical_id(i) == phys_id) 187 187 return i; 188 188 } 189 189 #else 190 190 /* In UP kernel, only processor 0 is valid */ 191 - if (apic_id == 0) 192 - return apic_id; 191 + if (phys_id == 0) 192 + return phys_id; 193 193 #endif 194 194 return -1; 195 195 } 196 196 197 197 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 198 198 { 199 - int apic_id; 199 + int phys_id; 200 200 201 - apic_id = acpi_get_apicid(handle, type, acpi_id); 201 + phys_id = acpi_get_phys_id(handle, type, acpi_id); 202 202 203 - return acpi_map_cpuid(apic_id, acpi_id); 203 + return acpi_map_cpuid(phys_id, acpi_id); 204 204 } 205 205 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-2
drivers/acpi/processor_idle.c
··· 985 985 state->flags = 0; 986 986 switch (cx->type) { 987 987 case ACPI_STATE_C1: 988 - if (cx->entry_method != ACPI_CSTATE_FFH) 989 - state->flags |= CPUIDLE_FLAG_TIME_INVALID; 990 988 991 989 state->enter = acpi_idle_enter_c1; 992 990 state->enter_dead = acpi_idle_play_dead;
+8 -5
drivers/acpi/scan.c
··· 1001 1001 if (device->wakeup.flags.valid) 1002 1002 acpi_power_resources_list_free(&device->wakeup.resources); 1003 1003 1004 - if (!device->flags.power_manageable) 1004 + if (!device->power.flags.power_resources) 1005 1005 return; 1006 1006 1007 1007 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { ··· 1744 1744 device->power.flags.power_resources) 1745 1745 device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; 1746 1746 1747 - if (acpi_bus_init_power(device)) { 1748 - acpi_free_power_resources_lists(device); 1747 + if (acpi_bus_init_power(device)) 1749 1748 device->flags.power_manageable = 0; 1750 - } 1751 1749 } 1752 1750 1753 1751 static void acpi_bus_get_flags(struct acpi_device *device) ··· 2369 2371 /* Skip devices that are not present. */ 2370 2372 if (!acpi_device_is_present(device)) { 2371 2373 device->flags.visited = false; 2374 + device->flags.power_manageable = 0; 2372 2375 return; 2373 2376 } 2374 2377 if (device->handler) 2375 2378 goto ok; 2376 2379 2377 2380 if (!device->flags.initialized) { 2378 - acpi_bus_update_power(device, NULL); 2381 + device->flags.power_manageable = 2382 + device->power.states[ACPI_STATE_D0].flags.valid; 2383 + if (acpi_bus_init_power(device)) 2384 + device->flags.power_manageable = 0; 2385 + 2379 2386 device->flags.initialized = true; 2380 2387 } 2381 2388 device->flags.visited = false;
+27
drivers/acpi/video.c
··· 505 505 DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), 506 506 }, 507 507 }, 508 + 509 + { 510 + .callback = video_disable_native_backlight, 511 + .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", 512 + .matches = { 513 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), 514 + DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), 515 + }, 516 + }, 517 + { 518 + .callback = video_disable_native_backlight, 519 + .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", 520 + .matches = { 521 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), 522 + DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), 523 + }, 524 + }, 525 + 526 + { 527 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ 528 + .callback = video_disable_native_backlight, 529 + .ident = "Dell XPS15 L521X", 530 + .matches = { 531 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 532 + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), 533 + }, 534 + }, 508 535 {} 509 536 }; 510 537
+2 -1
drivers/base/power/domain.c
··· 2088 2088 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() 2089 2089 * on failure. 2090 2090 */ 2091 - static struct generic_pm_domain *of_genpd_get_from_provider( 2091 + struct generic_pm_domain *of_genpd_get_from_provider( 2092 2092 struct of_phandle_args *genpdspec) 2093 2093 { 2094 2094 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); ··· 2108 2108 2109 2109 return genpd; 2110 2110 } 2111 + EXPORT_SYMBOL_GPL(of_genpd_get_from_provider); 2111 2112 2112 2113 /** 2113 2114 * genpd_dev_pm_detach - Detach a device from its PM domain.
+31 -8
drivers/base/power/opp.c
··· 108 108 /* Lock to allow exclusive modification to the device and opp lists */ 109 109 static DEFINE_MUTEX(dev_opp_list_lock); 110 110 111 + #define opp_rcu_lockdep_assert() \ 112 + do { \ 113 + rcu_lockdep_assert(rcu_read_lock_held() || \ 114 + lockdep_is_held(&dev_opp_list_lock), \ 115 + "Missing rcu_read_lock() or " \ 116 + "dev_opp_list_lock protection"); \ 117 + } while (0) 118 + 111 119 /** 112 120 * find_device_opp() - find device_opp struct using device pointer 113 121 * @dev: device pointer used to lookup device OPPs ··· 216 208 * This function returns the number of available opps if there are any, 217 209 * else returns 0 if none or the corresponding error value. 218 210 * 219 - * Locking: This function must be called under rcu_read_lock(). This function 220 - * internally references two RCU protected structures: device_opp and opp which 221 - * are safe as long as we are under a common RCU locked section. 211 + * Locking: This function takes rcu_read_lock(). 222 212 */ 223 213 int dev_pm_opp_get_opp_count(struct device *dev) 224 214 { ··· 224 218 struct dev_pm_opp *temp_opp; 225 219 int count = 0; 226 220 221 + rcu_read_lock(); 222 + 227 223 dev_opp = find_device_opp(dev); 228 224 if (IS_ERR(dev_opp)) { 229 - int r = PTR_ERR(dev_opp); 230 - dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); 231 - return r; 225 + count = PTR_ERR(dev_opp); 226 + dev_err(dev, "%s: device OPP not found (%d)\n", 227 + __func__, count); 228 + goto out_unlock; 232 229 } 233 230 234 231 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { ··· 239 230 count++; 240 231 } 241 232 233 + out_unlock: 234 + rcu_read_unlock(); 242 235 return count; 243 236 } 244 237 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); ··· 277 266 { 278 267 struct device_opp *dev_opp; 279 268 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 269 + 270 + opp_rcu_lockdep_assert(); 280 271 281 272 dev_opp = find_device_opp(dev); 282 273 if (IS_ERR(dev_opp)) { ··· 325 312 { 326 313 struct device_opp *dev_opp; 327 314 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 315 + 316 + opp_rcu_lockdep_assert(); 328 317 329 318 if (!dev || !freq) { 330 319 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); ··· 375 360 { 376 361 struct device_opp *dev_opp; 377 362 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 363 + 364 + opp_rcu_lockdep_assert(); 378 365 379 366 if (!dev || !freq) { 380 367 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); ··· 800 783 801 784 /* Check for existing list for 'dev' */ 802 785 dev_opp = find_device_opp(dev); 803 - if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev), 804 - PTR_ERR(dev_opp))) 786 + if (IS_ERR(dev_opp)) { 787 + int error = PTR_ERR(dev_opp); 788 + if (error != -ENODEV) 789 + WARN(1, "%s: dev_opp: %d\n", 790 + IS_ERR_OR_NULL(dev) ? 791 + "Invalid device" : dev_name(dev), 792 + error); 805 793 return; 794 + } 806 795 807 796 /* Hold our list modification lock here */ 808 797 mutex_lock(&dev_opp_list_lock);
+1 -1
drivers/block/null_blk.c
··· 530 530 goto out_cleanup_queues; 531 531 532 532 nullb->q = blk_mq_init_queue(&nullb->tag_set); 533 - if (!nullb->q) { 533 + if (IS_ERR(nullb->q)) { 534 534 rv = -ENOMEM; 535 535 goto out_cleanup_tags; 536 536 }
+126 -49
drivers/block/nvme-core.c
··· 215 215 cmd->fn = handler; 216 216 cmd->ctx = ctx; 217 217 cmd->aborted = 0; 218 + blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); 218 219 } 219 220 220 221 /* Special values must be less than 0x1000 */ ··· 432 431 if (unlikely(status)) { 433 432 if (!(status & NVME_SC_DNR || blk_noretry_request(req)) 434 433 && (jiffies - req->start_time) < req->timeout) { 434 + unsigned long flags; 435 + 435 436 blk_mq_requeue_request(req); 436 - blk_mq_kick_requeue_list(req->q); 437 + spin_lock_irqsave(req->q->queue_lock, flags); 438 + if (!blk_queue_stopped(req->q)) 439 + blk_mq_kick_requeue_list(req->q); 440 + spin_unlock_irqrestore(req->q->queue_lock, flags); 437 441 return; 438 442 } 439 443 req->errors = nvme_error_status(status); ··· 670 664 } 671 665 } 672 666 673 - blk_mq_start_request(req); 674 - 675 667 nvme_set_info(cmd, iod, req_completion); 676 668 spin_lock_irq(&nvmeq->q_lock); 677 669 if (req->cmd_flags & REQ_DISCARD) ··· 839 835 if (IS_ERR(req)) 840 836 return PTR_ERR(req); 841 837 838 + req->cmd_flags |= REQ_NO_TIMEOUT; 842 839 cmd_info = blk_mq_rq_to_pdu(req); 843 840 nvme_set_info(cmd_info, req, async_req_completion); 844 841 ··· 1021 1016 struct nvme_command cmd; 1022 1017 1023 1018 if (!nvmeq->qid || cmd_rq->aborted) { 1019 + unsigned long flags; 1020 + 1021 + spin_lock_irqsave(&dev_list_lock, flags); 1024 1022 if (work_busy(&dev->reset_work)) 1025 - return; 1023 + goto out; 1026 1024 list_del_init(&dev->node); 1027 1025 dev_warn(&dev->pci_dev->dev, 1028 1026 "I/O %d QID %d timeout, reset controller\n", 1029 1027 req->tag, nvmeq->qid); 1030 1028 dev->reset_workfn = nvme_reset_failed_dev; 1031 1029 queue_work(nvme_workq, &dev->reset_work); 1030 + out: 1031 + spin_unlock_irqrestore(&dev_list_lock, flags); 1032 1032 return; 1033 1033 } 1034 1034 ··· 1074 1064 void *ctx; 1075 1065 nvme_completion_fn fn; 1076 1066 struct nvme_cmd_info *cmd; 1077 - static struct nvme_completion cqe = { 1078 - .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), 1079 - }; 1067 + struct nvme_completion cqe; 1068 + 1069 + if (!blk_mq_request_started(req)) 1070 + return; 1080 1071 1081 1072 cmd = blk_mq_rq_to_pdu(req); 1082 1073 1083 1074 if (cmd->ctx == CMD_CTX_CANCELLED) 1084 1075 return; 1076 + 1077 + if (blk_queue_dying(req->q)) 1078 + cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 1079 + else 1080 + cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); 1081 + 1085 1082 1086 1083 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", 1087 1084 req->tag, nvmeq->qid); ··· 1101 1084 struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); 1102 1085 struct nvme_queue *nvmeq = cmd->nvmeq; 1103 1086 1104 - dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1105 - nvmeq->qid); 1106 - if (nvmeq->dev->initialized) 1107 - nvme_abort_req(req); 1108 - 1109 1087 /* 1110 1088 * The aborted req will be completed on receiving the abort req. 1111 1089 * We enable the timer again. If hit twice, it'll cause a device reset, 1112 1090 * as the device then is in a faulty state. 1113 1091 */ 1114 - return BLK_EH_RESET_TIMER; 1092 + int ret = BLK_EH_RESET_TIMER; 1093 + 1094 + dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, 1095 + nvmeq->qid); 1096 + 1097 + spin_lock_irq(&nvmeq->q_lock); 1098 + if (!nvmeq->dev->initialized) { 1099 + /* 1100 + * Force cancelled command frees the request, which requires we 1101 + * return BLK_EH_NOT_HANDLED. 1102 + */ 1103 + nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); 1104 + ret = BLK_EH_NOT_HANDLED; 1105 + } else 1106 + nvme_abort_req(req); 1107 + spin_unlock_irq(&nvmeq->q_lock); 1108 + 1109 + return ret; 1115 1110 } 1116 1111 1117 1112 static void nvme_free_queue(struct nvme_queue *nvmeq) ··· 1160 1131 */ 1161 1132 static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1162 1133 { 1163 - int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1134 + int vector; 1164 1135 1165 1136 spin_lock_irq(&nvmeq->q_lock); 1137 + if (nvmeq->cq_vector == -1) { 1138 + spin_unlock_irq(&nvmeq->q_lock); 1139 + return 1; 1140 + } 1141 + vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1166 1142 nvmeq->dev->online_queues--; 1143 + nvmeq->cq_vector = -1; 1167 1144 spin_unlock_irq(&nvmeq->q_lock); 1168 1145 1169 1146 irq_set_affinity_hint(vector, NULL); ··· 1204 1169 adapter_delete_sq(dev, qid); 1205 1170 adapter_delete_cq(dev, qid); 1206 1171 } 1172 + if (!qid && dev->admin_q) 1173 + blk_mq_freeze_queue_start(dev->admin_q); 1207 1174 nvme_clear_queue(nvmeq); 1208 1175 } 1209 1176 1210 1177 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1211 - int depth, int vector) 1178 + int depth) 1212 1179 { 1213 1180 struct device *dmadev = &dev->pci_dev->dev; 1214 1181 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); ··· 1236 1199 nvmeq->cq_phase = 1; 1237 1200 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1238 1201 nvmeq->q_depth = depth; 1239 - nvmeq->cq_vector = vector; 1240 1202 nvmeq->qid = qid; 1241 1203 dev->queue_count++; 1242 1204 dev->queues[qid] = nvmeq; ··· 1280 1244 struct nvme_dev *dev = nvmeq->dev; 1281 1245 int result; 1282 1246 1247 + nvmeq->cq_vector = qid - 1; 1283 1248 result = adapter_alloc_cq(dev, qid, nvmeq); 1284 1249 if (result < 0) 1285 1250 return result; ··· 1392 1355 .timeout = nvme_timeout, 1393 1356 }; 1394 1357 1358 + static void nvme_dev_remove_admin(struct nvme_dev *dev) 1359 + { 1360 + if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { 1361 + blk_cleanup_queue(dev->admin_q); 1362 + blk_mq_free_tag_set(&dev->admin_tagset); 1363 + } 1364 + } 1365 + 1395 1366 static int nvme_alloc_admin_tags(struct nvme_dev *dev) 1396 1367 { 1397 1368 if (!dev->admin_q) { ··· 1415 1370 return -ENOMEM; 1416 1371 1417 1372 dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); 1418 - if (!dev->admin_q) { 1373 + if (IS_ERR(dev->admin_q)) { 1419 1374 blk_mq_free_tag_set(&dev->admin_tagset); 1420 1375 return -ENOMEM; 1421 1376 } 1422 - } 1377 + if (!blk_get_queue(dev->admin_q)) { 1378 + nvme_dev_remove_admin(dev); 1379 + return -ENODEV; 1380 + } 1381 + } else 1382 + blk_mq_unfreeze_queue(dev->admin_q); 1423 1383 1424 1384 return 0; 1425 - } 1426 - 1427 - static void nvme_free_admin_tags(struct nvme_dev *dev) 1428 - { 1429 - if (dev->admin_q) 1430 - blk_mq_free_tag_set(&dev->admin_tagset); 1431 1385 } 1432 1386 1433 1387 static int nvme_configure_admin_queue(struct nvme_dev *dev) ··· 1460 1416 1461 1417 nvmeq = dev->queues[0]; 1462 1418 if (!nvmeq) { 1463 - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); 1419 + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1464 1420 if (!nvmeq) 1465 1421 return -ENOMEM; 1466 1422 } ··· 1483 1439 if (result) 1484 1440 goto free_nvmeq; 1485 1441 1486 - result = nvme_alloc_admin_tags(dev); 1442 + nvmeq->cq_vector = 0; 1443 + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1487 1444 if (result) 1488 1445 goto free_nvmeq; 1489 1446 1490 - result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1491 - if (result) 1492 - goto free_tags; 1493 - 1494 1447 return result; 1495 1448 1496 - free_tags: 1497 - nvme_free_admin_tags(dev); 1498 1449 free_nvmeq: 1499 1450 nvme_free_queues(dev, 0); 1500 1451 return result; ··· 1983 1944 unsigned i; 1984 1945 1985 1946 for (i = dev->queue_count; i <= dev->max_qid; i++) 1986 - if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 1947 + if (!nvme_alloc_queue(dev, i, dev->q_depth)) 1987 1948 break; 1988 1949 1989 1950 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) ··· 2274 2235 break; 2275 2236 if (!schedule_timeout(ADMIN_TIMEOUT) || 2276 2237 fatal_signal_pending(current)) { 2238 + /* 2239 + * Disable the controller first since we can't trust it 2240 + * at this point, but leave the admin queue enabled 2241 + * until all queue deletion requests are flushed. 2242 + * FIXME: This may take a while if there are more h/w 2243 + * queues than admin tags. 2244 + */ 2277 2245 set_current_state(TASK_RUNNING); 2278 - 2279 2246 nvme_disable_ctrl(dev, readq(&dev->bar->cap)); 2280 - nvme_disable_queue(dev, 0); 2281 - 2282 - send_sig(SIGKILL, dq->worker->task, 1); 2247 + nvme_clear_queue(dev->queues[0]); 2283 2248 flush_kthread_worker(dq->worker); 2249 + nvme_disable_queue(dev, 0); 2284 2250 return; 2285 2251 } 2286 2252 } ··· 2362 2318 { 2363 2319 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, 2364 2320 cmdinfo.work); 2365 - allow_signal(SIGKILL); 2366 2321 if (nvme_delete_sq(nvmeq)) 2367 2322 nvme_del_queue_end(nvmeq); 2368 2323 } ··· 2419 2376 kthread_stop(tmp); 2420 2377 } 2421 2378 2379 + static void nvme_freeze_queues(struct nvme_dev *dev) 2380 + { 2381 + struct nvme_ns *ns; 2382 + 2383 + list_for_each_entry(ns, &dev->namespaces, list) { 2384 + blk_mq_freeze_queue_start(ns->queue); 2385 + 2386 + spin_lock(ns->queue->queue_lock); 2387 + queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); 2388 + spin_unlock(ns->queue->queue_lock); 2389 + 2390 + blk_mq_cancel_requeue_work(ns->queue); 2391 + blk_mq_stop_hw_queues(ns->queue); 2392 + } 2393 + } 2394 + 2395 + static void nvme_unfreeze_queues(struct nvme_dev *dev) 2396 + { 2397 + struct nvme_ns *ns; 2398 + 2399 + list_for_each_entry(ns, &dev->namespaces, list) { 2400 + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); 2401 + blk_mq_unfreeze_queue(ns->queue); 2402 + blk_mq_start_stopped_hw_queues(ns->queue, true); 2403 + blk_mq_kick_requeue_list(ns->queue); 2404 + } 2405 + } 2406 + 2422 2407 static void nvme_dev_shutdown(struct nvme_dev *dev) 2423 2408 { 2424 2409 int i; ··· 2455 2384 dev->initialized = 0; 2456 2385 nvme_dev_list_remove(dev); 2457 2386 2458 - if (dev->bar) 2387 + if (dev->bar) { 2388 + nvme_freeze_queues(dev); 2459 2389 csts = readl(&dev->bar->csts); 2390 + } 2460 2391 if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { 2461 2392 for (i = dev->queue_count - 1; i >= 0; i--) { 2462 2393 struct nvme_queue *nvmeq = dev->queues[i]; ··· 2473 2400 nvme_dev_unmap(dev); 2474 2401 } 2475 2402 2476 - static void nvme_dev_remove_admin(struct nvme_dev *dev) 2477 - { 2478 - if (dev->admin_q && !blk_queue_dying(dev->admin_q)) 2479 - blk_cleanup_queue(dev->admin_q); 2480 - } 2481 - 2482 2403 static void nvme_dev_remove(struct nvme_dev *dev) 2483 2404 { 2484 2405 struct nvme_ns *ns; ··· 2480 2413 list_for_each_entry(ns, &dev->namespaces, list) { 2481 2414 if (ns->disk->flags & GENHD_FL_UP) 2482 2415 del_gendisk(ns->disk); 2483 - if (!blk_queue_dying(ns->queue)) 2416 + if (!blk_queue_dying(ns->queue)) { 2417 + blk_mq_abort_requeue_list(ns->queue); 2484 2418 blk_cleanup_queue(ns->queue); 2419 + } 2485 2420 } 2486 2421 } 2487 2422 ··· 2564 2495 nvme_free_namespaces(dev); 2565 2496 nvme_release_instance(dev); 2566 2497 blk_mq_free_tag_set(&dev->tagset); 2498 + blk_put_queue(dev->admin_q); 2567 2499 kfree(dev->queues); 2568 2500 kfree(dev->entry); 2569 2501 kfree(dev); ··· 2661 2591 } 2662 2592 2663 2593 nvme_init_queue(dev->queues[0], 0); 2594 + result = nvme_alloc_admin_tags(dev); 2595 + if (result) 2596 + goto disable; 2664 2597 2665 2598 result = nvme_setup_io_queues(dev); 2666 2599 if (result) 2667 - goto disable; 2600 + goto free_tags; 2668 2601 2669 2602 nvme_set_irq_hints(dev); 2670 2603 2671 2604 return result; 2672 2605 2606 + free_tags: 2607 + nvme_dev_remove_admin(dev); 2673 2608 disable: 2674 2609 nvme_disable_queue(dev, 0); 2675 2610 nvme_dev_list_remove(dev); ··· 2714 2639 dev->reset_workfn = nvme_remove_disks; 2715 2640 queue_work(nvme_workq, &dev->reset_work); 2716 2641 spin_unlock(&dev_list_lock); 2642 + } else { 2643 + nvme_unfreeze_queues(dev); 2644 + nvme_set_irq_hints(dev); 2717 2645 } 2718 2646 dev->initialized = 1; 2719 2647 return 0; ··· 2854 2776 pci_set_drvdata(pdev, NULL); 2855 2777 flush_work(&dev->reset_work); 2856 2778 misc_deregister(&dev->miscdev); 2857 - nvme_dev_remove(dev); 2858 2779 nvme_dev_shutdown(dev); 2780 + nvme_dev_remove(dev); 2859 2781 nvme_dev_remove_admin(dev); 2860 2782 nvme_free_queues(dev, 0); 2861 - nvme_free_admin_tags(dev); 2862 2783 nvme_release_prp_pools(dev); 2863 2784 kref_put(&dev->kref, nvme_free_dev); 2864 2785 }
+1 -1
drivers/block/virtio_blk.c
··· 638 638 goto out_put_disk; 639 639 640 640 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); 641 - if (!q) { 641 + if (IS_ERR(q)) { 642 642 err = -ENOMEM; 643 643 goto out_free_tags; 644 644 }
+3
drivers/bus/arm-cci.c
··· 1312 1312 if (!np) 1313 1313 return -ENODEV; 1314 1314 1315 + if (!of_device_is_available(np)) 1316 + return -ENODEV; 1317 + 1315 1318 cci_config = of_match_node(arm_cci_matches, np)->data; 1316 1319 if (!cci_config) 1317 1320 return -ENODEV;
+2 -1
drivers/char/ipmi/ipmi_ssif.c
··· 969 969 970 970 do_gettimeofday(&t); 971 971 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", 972 - msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); 972 + msg->data[0], msg->data[1], 973 + (long) t.tv_sec, (long) t.tv_usec); 973 974 } 974 975 } 975 976
+27
drivers/clk/at91/clk-slow.c
··· 70 70 71 71 #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw) 72 72 73 + static struct clk *slow_clk; 73 74 74 75 static int clk_slow_osc_prepare(struct clk_hw *hw) 75 76 { ··· 358 357 clk = clk_register(NULL, &slowck->hw); 359 358 if (IS_ERR(clk)) 360 359 kfree(slowck); 360 + else 361 + slow_clk = clk; 361 362 362 363 return clk; 363 364 } ··· 436 433 clk = clk_register(NULL, &slowck->hw); 437 434 if (IS_ERR(clk)) 438 435 kfree(slowck); 436 + else 437 + slow_clk = clk; 439 438 440 439 return clk; 441 440 } ··· 470 465 471 466 of_clk_add_provider(np, of_clk_src_simple_get, clk); 472 467 } 468 + 469 + /* 470 + * FIXME: All slow clk users are not properly claiming it (get + prepare + 471 + * enable) before using it. 472 + * If all users properly claiming this clock decide that they don't need it 473 + * anymore (or are removed), it is disabled while faulty users are still 474 + * requiring it, and the system hangs. 475 + * Prevent this clock from being disabled until all users are properly 476 + * requesting it. 477 + * Once this is done we should remove this function and the slow_clk variable. 478 + */ 479 + static int __init of_at91_clk_slow_retain(void) 480 + { 481 + if (!slow_clk) 482 + return 0; 483 + 484 + __clk_get(slow_clk); 485 + clk_prepare_enable(slow_clk); 486 + 487 + return 0; 488 + } 489 + arch_initcall(of_at91_clk_slow_retain);
-1
drivers/clk/berlin/bg2q.c
··· 285 285 { "pbridge", "perif", 15, CLK_IGNORE_UNUSED }, 286 286 { "sdio", "perif", 16, CLK_IGNORE_UNUSED }, 287 287 { "nfc", "perif", 18 }, 288 - { "smemc", "perif", 19 }, 289 288 { "pcie", "perif", 22 }, 290 289 }; 291 290
+1 -1
drivers/clk/clk-ppc-corenet.c
··· 291 291 {} 292 292 }; 293 293 294 - static struct platform_driver ppc_corenet_clk_driver __initdata = { 294 + static struct platform_driver ppc_corenet_clk_driver = { 295 295 .driver = { 296 296 .name = "ppc_corenet_clock", 297 297 .of_match_table = ppc_clk_ids,
+1 -1
drivers/clk/clk.c
··· 1366 1366 new_rate = clk->ops->determine_rate(clk->hw, rate, 1367 1367 &best_parent_rate, 1368 1368 &parent_hw); 1369 - parent = parent_hw->clk; 1369 + parent = parent_hw ? parent_hw->clk : NULL; 1370 1370 } else if (clk->ops->round_rate) { 1371 1371 new_rate = clk->ops->round_rate(clk->hw, rate, 1372 1372 &best_parent_rate);
+6 -4
drivers/clk/rockchip/clk-cpu.c
··· 124 124 { 125 125 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; 126 126 unsigned long alt_prate, alt_div; 127 + unsigned long flags; 127 128 128 129 alt_prate = clk_get_rate(cpuclk->alt_parent); 129 130 130 - spin_lock(cpuclk->lock); 131 + spin_lock_irqsave(cpuclk->lock, flags); 131 132 132 133 /* 133 134 * If the old parent clock speed is less than the clock speed ··· 165 164 cpuclk->reg_base + reg_data->core_reg); 166 165 } 167 166 168 - spin_unlock(cpuclk->lock); 167 + spin_unlock_irqrestore(cpuclk->lock, flags); 169 168 return 0; 170 169 } 171 170 ··· 174 173 { 175 174 const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; 176 175 const struct rockchip_cpuclk_rate_table *rate; 176 + unsigned long flags; 177 177 178 178 rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); 179 179 if (!rate) { ··· 183 181 return -EINVAL; 184 182 } 185 183 186 - spin_lock(cpuclk->lock); 184 + spin_lock_irqsave(cpuclk->lock, flags); 187 185 188 186 if (ndata->old_rate < ndata->new_rate) 189 187 rockchip_cpuclk_set_dividers(cpuclk, rate); ··· 203 201 if (ndata->old_rate > ndata->new_rate) 204 202 rockchip_cpuclk_set_dividers(cpuclk, rate); 205 203 206 - spin_unlock(cpuclk->lock); 204 + spin_unlock_irqrestore(cpuclk->lock, flags); 207 205 return 0; 208 206 } 209 207
+20 -7
drivers/clk/rockchip/clk-rk3188.c
··· 210 210 PNAME(mux_mac_p) = { "gpll", "dpll" }; 211 211 PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" }; 212 212 213 + static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = { 214 + [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), 215 + RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates), 216 + [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4), 217 + RK2928_MODE_CON, 4, 4, 0, NULL), 218 + [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8), 219 + RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates), 220 + [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12), 221 + RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates), 222 + }; 223 + 213 224 static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { 214 225 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), 215 226 RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates), ··· 438 427 /* hclk_peri gates */ 439 428 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS), 440 429 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS), 441 - GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS), 430 + GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS), 442 431 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), 443 432 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), 444 - GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS), 445 - GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS), 433 + GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS), 434 + GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS), 446 435 GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS), 447 436 GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS), 448 437 GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS), ··· 603 592 GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS), 604 593 GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 605 594 606 - GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS), 595 + GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED, 596 + RK2928_CLKGATE_CON(5), 14, GFLAGS), 607 597 608 598 GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS), 609 599 ··· 692 680 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 693 681 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS), 694 682 695 - GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), 683 + GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED, 684 + RK2928_CLKGATE_CON(7), 3, GFLAGS), 696 685 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), 697 686 698 687 GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS), ··· 748 735 static void __init rk3066a_clk_init(struct device_node *np) 749 736 { 750 737 rk3188_common_clk_init(np); 751 - rockchip_clk_register_plls(rk3188_pll_clks, 752 - ARRAY_SIZE(rk3188_pll_clks), 738 + rockchip_clk_register_plls(rk3066_pll_clks, 739 + ARRAY_SIZE(rk3066_pll_clks), 753 740 RK3066_GRF_SOC_STATUS); 754 741 rockchip_clk_register_branches(rk3066a_clk_branches, 755 742 ARRAY_SIZE(rk3066a_clk_branches));
+14 -14
drivers/clk/rockchip/clk-rk3288.c
··· 145 145 } 146 146 147 147 static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = { 148 - RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4), 149 - RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4), 150 - RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4), 151 - RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4), 152 - RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4), 153 - RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4), 154 - RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4), 155 - RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4), 156 - RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4), 157 - RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4), 158 - RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4), 159 - RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4), 160 - RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4), 161 - RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4), 148 + RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3), 149 + RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3), 150 + RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3), 151 + RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3), 152 + RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3), 153 + RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3), 154 + RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3), 155 + RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3), 156 + RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3), 157 + RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3), 158 + RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3), 159 + RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3), 160 + RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3), 161 + RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3), 162 162 }; 163 163 164 164 static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
+1 -1
drivers/clocksource/arm_arch_timer.c
··· 462 462 463 463 /* Register the CP15 based counter if we have one */ 464 464 if (type & ARCH_CP15_TIMER) { 465 - if (arch_timer_use_virtual) 465 + if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual) 466 466 arch_timer_read_counter = arch_counter_get_cntvct; 467 467 else 468 468 arch_timer_read_counter = arch_counter_get_cntpct;
+11
drivers/cpufreq/cpufreq-dt.c
··· 211 211 /* OPPs might be populated at runtime, don't check for error here */ 212 212 of_init_opp_table(cpu_dev); 213 213 214 + /* 215 + * But we need OPP table to function so if it is not there let's 216 + * give platform code chance to provide it for us. 217 + */ 218 + ret = dev_pm_opp_get_opp_count(cpu_dev); 219 + if (ret <= 0) { 220 + pr_debug("OPP table is not ready, deferring probe\n"); 221 + ret = -EPROBE_DEFER; 222 + goto out_free_opp; 223 + } 224 + 214 225 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 215 226 if (!priv) { 216 227 ret = -ENOMEM;
+6
drivers/cpufreq/cpufreq.c
··· 2028 2028 /* Don't start any governor operations if we are entering suspend */ 2029 2029 if (cpufreq_suspended) 2030 2030 return 0; 2031 + /* 2032 + * Governor might not be initiated here if ACPI _PPC changed 2033 + * notification happened, so check it. 2034 + */ 2035 + if (!policy->governor) 2036 + return -EINVAL; 2031 2037 2032 2038 if (policy->governor->max_transition_latency && 2033 2039 policy->cpuinfo.transition_latency >
+1 -6
drivers/cpuidle/governors/ladder.c
··· 79 79 80 80 last_state = &ldev->states[last_idx]; 81 81 82 - if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) { 83 - last_residency = cpuidle_get_last_residency(dev) - \ 84 - drv->states[last_idx].exit_latency; 85 - } 86 - else 87 - last_residency = last_state->threshold.promotion_time + 1; 82 + last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency; 88 83 89 84 /* consider promotion */ 90 85 if (last_idx < drv->state_count - 1 &&
+12 -17
drivers/cpuidle/governors/menu.c
··· 396 396 * power state and occurrence of the wakeup event. 397 397 * 398 398 * If the entered idle state didn't support residency measurements, 399 - * we are basically lost in the dark how much time passed. 400 - * As a compromise, assume we slept for the whole expected time. 399 + * we use them anyway if they are short, and if long, 400 + * truncate to the whole expected time. 401 401 * 402 402 * Any measured amount of time will include the exit latency. 403 403 * Since we are interested in when the wakeup begun, not when it ··· 405 405 * the measured amount of time is less than the exit latency, 406 406 * assume the state was never reached and the exit latency is 0. 407 407 */ 408 - if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) { 409 - /* Use timer value as is */ 408 + 409 + /* measured value */ 410 + measured_us = cpuidle_get_last_residency(dev); 411 + 412 + /* Deduct exit latency */ 413 + if (measured_us > target->exit_latency) 414 + measured_us -= target->exit_latency; 415 + 416 + /* Make sure our coefficients do not exceed unity */ 417 + if (measured_us > data->next_timer_us) 410 418 measured_us = data->next_timer_us; 411 - 412 - } else { 413 - /* Use measured value */ 414 - measured_us = cpuidle_get_last_residency(dev); 415 - 416 - /* Deduct exit latency */ 417 - if (measured_us > target->exit_latency) 418 - measured_us -= target->exit_latency; 419 - 420 - /* Make sure our coefficients do not exceed unity */ 421 - if (measured_us > data->next_timer_us) 422 - measured_us = data->next_timer_us; 423 - } 424 419 425 420 /* Update our correction ratio */ 426 421 new_factor = data->correction_factor[data->bucket];
-2
drivers/dma/dw/core.c
··· 1505 1505 dw->regs = chip->regs; 1506 1506 chip->dw = dw; 1507 1507 1508 - pm_runtime_enable(chip->dev); 1509 1508 pm_runtime_get_sync(chip->dev); 1510 1509 1511 1510 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); ··· 1702 1703 } 1703 1704 1704 1705 pm_runtime_put_sync_suspend(chip->dev); 1705 - pm_runtime_disable(chip->dev); 1706 1706 return 0; 1707 1707 } 1708 1708 EXPORT_SYMBOL_GPL(dw_dma_remove);
+5
drivers/dma/dw/platform.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/device.h> 17 17 #include <linux/clk.h> 18 + #include <linux/pm_runtime.h> 18 19 #include <linux/platform_device.h> 19 20 #include <linux/dmaengine.h> 20 21 #include <linux/dma-mapping.h> ··· 186 185 if (err) 187 186 return err; 188 187 188 + pm_runtime_enable(&pdev->dev); 189 + 189 190 err = dw_dma_probe(chip, pdata); 190 191 if (err) 191 192 goto err_dw_dma_probe; ··· 208 205 return 0; 209 206 210 207 err_dw_dma_probe: 208 + pm_runtime_disable(&pdev->dev); 211 209 clk_disable_unprepare(chip->clk); 212 210 return err; 213 211 } ··· 221 217 of_dma_controller_free(pdev->dev.of_node); 222 218 223 219 dw_dma_remove(chip); 220 + pm_runtime_disable(&pdev->dev); 224 221 clk_disable_unprepare(chip->clk); 225 222 226 223 return 0;
+67 -89
drivers/gpio/gpio-dln2.c
··· 47 47 48 48 #define DLN2_GPIO_MAX_PINS 32 49 49 50 - struct dln2_irq_work { 51 - struct work_struct work; 52 - struct dln2_gpio *dln2; 53 - int pin; 54 - int type; 55 - }; 56 - 57 50 struct dln2_gpio { 58 51 struct platform_device *pdev; 59 52 struct gpio_chip gpio; ··· 57 64 */ 58 65 DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); 59 66 60 - DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); 61 - DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); 62 - DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); 63 - struct dln2_irq_work *irq_work; 67 + /* active IRQs - not synced to hardware */ 68 + DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS); 69 + /* active IRQS - synced to hardware */ 70 + DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS); 71 + int irq_type[DLN2_GPIO_MAX_PINS]; 72 + struct mutex irq_lock; 64 73 }; 65 74 66 75 struct dln2_gpio_pin { ··· 136 141 return !!ret; 137 142 } 138 143 139 - static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, 140 - unsigned int pin, int value) 144 + static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, 145 + unsigned int pin, int value) 141 146 { 142 147 struct dln2_gpio_pin_val req = { 143 148 .pin = cpu_to_le16(pin), 144 149 .value = value, 145 150 }; 146 151 147 - dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, 148 - sizeof(req)); 152 + return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, 153 + sizeof(req)); 149 154 } 150 155 151 156 #define DLN2_GPIO_DIRECTION_IN 0 ··· 262 267 static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, 263 268 int value) 264 269 { 270 + struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio); 271 + int ret; 272 + 273 + ret = dln2_gpio_pin_set_out_val(dln2, offset, value); 274 + if (ret < 0) 275 + return ret; 276 + 265 277 return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); 266 278 } 267 279 ··· 299 297 &req, sizeof(req)); 300 298 } 301 299 302 - static void dln2_irq_work(struct work_struct *w) 303 - { 304 - struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work); 305 - struct dln2_gpio *dln2 = iw->dln2; 306 - u8 type = iw->type & DLN2_GPIO_EVENT_MASK; 307 - 308 - if (test_bit(iw->pin, dln2->irqs_enabled)) 309 - dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0); 310 - else 311 - dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0); 312 - } 313 - 314 - static void dln2_irq_enable(struct irq_data *irqd) 300 + static void dln2_irq_unmask(struct irq_data *irqd) 315 301 { 316 302 struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 317 303 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 318 304 int pin = irqd_to_hwirq(irqd); 319 305 320 - set_bit(pin, dln2->irqs_enabled); 321 - schedule_work(&dln2->irq_work[pin].work); 322 - } 323 - 324 - static void dln2_irq_disable(struct irq_data *irqd) 325 - { 326 - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 327 - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 328 - int pin = irqd_to_hwirq(irqd); 329 - 330 - clear_bit(pin, dln2->irqs_enabled); 331 - schedule_work(&dln2->irq_work[pin].work); 306 + set_bit(pin, dln2->unmasked_irqs); 332 307 } 333 308 334 309 static void dln2_irq_mask(struct irq_data *irqd) ··· 314 335 struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 315 336 int pin = irqd_to_hwirq(irqd); 316 337 317 - set_bit(pin, dln2->irqs_masked); 318 - } 319 - 320 - static void dln2_irq_unmask(struct irq_data *irqd) 321 - { 322 - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 323 - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 324 - struct device *dev = dln2->gpio.dev; 325 - int pin = irqd_to_hwirq(irqd); 326 - 327 - if (test_and_clear_bit(pin, dln2->irqs_pending)) { 328 - int irq; 329 - 330 - irq = irq_find_mapping(dln2->gpio.irqdomain, pin); 331 - if (!irq) { 332 - dev_err(dev, "pin %d not mapped to IRQ\n", pin); 333 - return; 334 - } 335 - 336 - generic_handle_irq(irq); 337 - } 338 + clear_bit(pin, dln2->unmasked_irqs); 338 339 } 339 340 340 341 static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) ··· 325 366 326 367 switch (type) { 327 368 case IRQ_TYPE_LEVEL_HIGH: 328 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; 369 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH; 329 370 break; 330 371 case IRQ_TYPE_LEVEL_LOW: 331 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; 372 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW; 332 373 break; 333 374 case IRQ_TYPE_EDGE_BOTH: 334 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; 375 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE; 335 376 break; 336 377 case IRQ_TYPE_EDGE_RISING: 337 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; 378 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING; 338 379 break; 339 380 case IRQ_TYPE_EDGE_FALLING: 340 - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; 381 + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING; 341 382 break; 342 383 default: 343 384 return -EINVAL; ··· 346 387 return 0; 347 388 } 348 389 390 + static void dln2_irq_bus_lock(struct irq_data *irqd) 391 + { 392 + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 393 + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 394 + 395 + mutex_lock(&dln2->irq_lock); 396 + } 397 + 398 + static void dln2_irq_bus_unlock(struct irq_data *irqd) 399 + { 400 + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); 401 + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); 402 + int pin = irqd_to_hwirq(irqd); 403 + int enabled, unmasked; 404 + unsigned type; 405 + int ret; 406 + 407 + enabled = test_bit(pin, dln2->enabled_irqs); 408 + unmasked = test_bit(pin, dln2->unmasked_irqs); 409 + 410 + if (enabled != unmasked) { 411 + if (unmasked) { 412 + type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK; 413 + set_bit(pin, dln2->enabled_irqs); 414 + } else { 415 + type = DLN2_GPIO_EVENT_NONE; 416 + clear_bit(pin, dln2->enabled_irqs); 417 + } 418 + 419 + ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0); 420 + if (ret) 421 + dev_err(dln2->gpio.dev, "failed to set event\n"); 422 + } 423 + 424 + mutex_unlock(&dln2->irq_lock); 425 + } 426 + 349 427 static struct irq_chip dln2_gpio_irqchip = { 350 428 .name = "dln2-irq", 351 - .irq_enable = dln2_irq_enable, 352 - .irq_disable = dln2_irq_disable, 353 429 .irq_mask = dln2_irq_mask, 354 430 .irq_unmask = dln2_irq_unmask, 355 431 .irq_set_type = dln2_irq_set_type, 432 + .irq_bus_lock = dln2_irq_bus_lock, 433 + .irq_bus_sync_unlock = dln2_irq_bus_unlock, 356 434 }; 357 435 358 436 static void dln2_gpio_event(struct platform_device *pdev, u16 echo, ··· 421 425 return; 422 426 } 423 427 424 - if (!test_bit(pin, dln2->irqs_enabled)) 425 - return; 426 - if (test_bit(pin, dln2->irqs_masked)) { 427 - set_bit(pin, dln2->irqs_pending); 428 - return; 429 - } 430 - 431 - switch (dln2->irq_work[pin].type) { 428 + switch (dln2->irq_type[pin]) { 432 429 case DLN2_GPIO_EVENT_CHANGE_RISING: 433 430 if (event->value) 434 431 generic_handle_irq(irq); ··· 440 451 struct dln2_gpio *dln2; 441 452 struct device *dev = &pdev->dev; 442 453 int pins; 443 - int i, ret; 454 + int ret; 444 455 445 456 pins = dln2_gpio_get_pin_count(pdev); 446 457 if (pins < 0) { ··· 456 467 if (!dln2) 457 468 return -ENOMEM; 458 469 459 - dln2->irq_work = devm_kcalloc(&pdev->dev, pins, 460 - sizeof(struct dln2_irq_work), GFP_KERNEL); 461 - if (!dln2->irq_work) 462 - return -ENOMEM; 463 - for (i = 0; i < pins; i++) { 464 - INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work); 465 - dln2->irq_work[i].pin = i; 466 - dln2->irq_work[i].dln2 = dln2; 467 - } 470 + mutex_init(&dln2->irq_lock); 468 471 469 472 dln2->pdev = pdev; 470 473 ··· 510 529 static int dln2_gpio_remove(struct platform_device *pdev) 511 530 { 512 531 struct dln2_gpio *dln2 = platform_get_drvdata(pdev); 513 - int i; 514 532 515 533 dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); 516 - for (i = 0; i < dln2->gpio.ngpio; i++) 517 - flush_work(&dln2->irq_work[i].work); 518 534 gpiochip_remove(&dln2->gpio); 519 535 520 536 return 0;
+2 -1
drivers/gpio/gpio-grgpio.c
··· 441 441 err = gpiochip_add(gc); 442 442 if (err) { 443 443 dev_err(&ofdev->dev, "Could not add gpiochip\n"); 444 - irq_domain_remove(priv->domain); 444 + if (priv->domain) 445 + irq_domain_remove(priv->domain); 445 446 return err; 446 447 } 447 448
+1 -1
drivers/gpu/drm/Makefile
··· 37 37 obj-$(CONFIG_DRM_TTM) += ttm/ 38 38 obj-$(CONFIG_DRM_TDFX) += tdfx/ 39 39 obj-$(CONFIG_DRM_R128) += r128/ 40 + obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ 40 41 obj-$(CONFIG_DRM_RADEON)+= radeon/ 41 42 obj-$(CONFIG_DRM_MGA) += mga/ 42 43 obj-$(CONFIG_DRM_I810) += i810/ ··· 68 67 obj-y += i2c/ 69 68 obj-y += panel/ 70 69 obj-y += bridge/ 71 - obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
+169 -149
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 31 31 #include <uapi/linux/kfd_ioctl.h> 32 32 #include <linux/time.h> 33 33 #include <linux/mm.h> 34 - #include <linux/uaccess.h> 35 34 #include <uapi/asm-generic/mman-common.h> 36 35 #include <asm/processor.h> 37 36 #include "kfd_priv.h" ··· 126 127 return 0; 127 128 } 128 129 129 - static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 130 - void __user *arg) 130 + static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 131 + void *data) 131 132 { 132 - struct kfd_ioctl_get_version_args args; 133 + struct kfd_ioctl_get_version_args *args = data; 133 134 int err = 0; 134 135 135 - args.major_version = KFD_IOCTL_MAJOR_VERSION; 136 - args.minor_version = KFD_IOCTL_MINOR_VERSION; 137 - 138 - if (copy_to_user(arg, &args, sizeof(args))) 139 - err = -EFAULT; 136 + args->major_version = KFD_IOCTL_MAJOR_VERSION; 137 + args->minor_version = KFD_IOCTL_MINOR_VERSION; 140 138 141 139 return err; 142 140 } ··· 217 221 return 0; 218 222 } 219 223 220 - static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 221 - void __user *arg) 224 + static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 225 + void *data) 222 226 { 223 - struct kfd_ioctl_create_queue_args args; 227 + struct kfd_ioctl_create_queue_args *args = data; 224 228 struct kfd_dev *dev; 225 229 int err = 0; 226 230 unsigned int queue_id; ··· 229 233 230 234 memset(&q_properties, 0, sizeof(struct queue_properties)); 231 235 232 - if (copy_from_user(&args, arg, sizeof(args))) 233 - return -EFAULT; 234 - 235 236 pr_debug("kfd: creating queue ioctl\n"); 236 237 237 - err = set_queue_properties_from_user(&q_properties, &args); 238 + err = set_queue_properties_from_user(&q_properties, args); 238 239 if (err) 239 240 return err; 240 241 241 - dev = kfd_device_by_id(args.gpu_id); 242 + dev = kfd_device_by_id(args->gpu_id); 242 243 if (dev == NULL) 243 244 return -EINVAL; 244 245 ··· 243 250 244 251 pdd = kfd_bind_process_to_device(dev, p); 245 252 if (IS_ERR(pdd)) { 246 - err = PTR_ERR(pdd); 253 + err = -ESRCH; 247 254 goto err_bind_process; 248 255 } 249 256 ··· 256 263 if (err != 0) 257 264 goto err_create_queue; 258 265 259 - args.queue_id = queue_id; 266 + args->queue_id = queue_id; 260 267 261 268 /* Return gpu_id as doorbell offset for mmap usage */ 262 - args.doorbell_offset = args.gpu_id << PAGE_SHIFT; 263 - 264 - if (copy_to_user(arg, &args, sizeof(args))) { 265 - err = -EFAULT; 266 - goto err_copy_args_out; 267 - } 269 + args->doorbell_offset = args->gpu_id << PAGE_SHIFT; 268 270 269 271 mutex_unlock(&p->mutex); 270 272 271 - pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); 273 + pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); 272 274 273 275 pr_debug("ring buffer address == 0x%016llX\n", 274 - args.ring_base_address); 276 + args->ring_base_address); 275 277 276 278 pr_debug("read ptr address == 0x%016llX\n", 277 - args.read_pointer_address); 279 + args->read_pointer_address); 278 280 279 281 pr_debug("write ptr address == 0x%016llX\n", 280 - args.write_pointer_address); 282 + args->write_pointer_address); 281 283 282 284 return 0; 283 285 284 - err_copy_args_out: 285 - pqm_destroy_queue(&p->pqm, queue_id); 286 286 err_create_queue: 287 287 err_bind_process: 288 288 mutex_unlock(&p->mutex); ··· 283 297 } 284 298 285 299 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 286 - void __user *arg) 300 + void *data) 287 301 { 288 302 int retval; 289 - struct kfd_ioctl_destroy_queue_args args; 290 - 291 - if (copy_from_user(&args, arg, sizeof(args))) 292 - return -EFAULT; 303 + struct kfd_ioctl_destroy_queue_args *args = data; 293 304 294 305 pr_debug("kfd: destroying queue id %d for PASID %d\n", 295 - args.queue_id, 306 + args->queue_id, 296 307 p->pasid); 297 308 298 309 mutex_lock(&p->mutex); 299 310 300 - retval = pqm_destroy_queue(&p->pqm, args.queue_id); 311 + retval = pqm_destroy_queue(&p->pqm, args->queue_id); 301 312 302 313 mutex_unlock(&p->mutex); 303 314 return retval; 304 315 } 305 316 306 317 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 307 - void __user *arg) 318 + void *data) 308 319 { 309 320 int retval; 310 - struct kfd_ioctl_update_queue_args args; 321 + struct kfd_ioctl_update_queue_args *args = data; 311 322 struct queue_properties properties; 312 323 313 - if (copy_from_user(&args, arg, sizeof(args))) 314 - return -EFAULT; 315 - 316 - if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 324 + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { 317 325 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 318 326 return -EINVAL; 319 327 } 320 328 321 - if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { 329 + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 322 330 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 323 331 return -EINVAL; 324 332 } 325 333 326 - if ((args.ring_base_address) && 334 + if ((args->ring_base_address) && 327 335 (!access_ok(VERIFY_WRITE, 328 - (const void __user *) args.ring_base_address, 336 + (const void __user *) args->ring_base_address, 329 337 sizeof(uint64_t)))) { 330 338 pr_err("kfd: can't access ring base address\n"); 331 339 return -EFAULT; 332 340 } 333 341 334 - if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { 342 + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 335 343 pr_err("kfd: ring size must be a power of 2 or 0\n"); 336 344 return -EINVAL; 337 345 } 338 346 339 - properties.queue_address = args.ring_base_address; 340 - properties.queue_size = args.ring_size; 341 - properties.queue_percent = args.queue_percentage; 342 - properties.priority = args.queue_priority; 347 + properties.queue_address = args->ring_base_address; 348 + properties.queue_size = args->ring_size; 349 + properties.queue_percent = args->queue_percentage; 350 + properties.priority = args->queue_priority; 343 351 344 352 pr_debug("kfd: updating queue id %d for PASID %d\n", 345 - args.queue_id, p->pasid); 353 + args->queue_id, p->pasid); 346 354 347 355 mutex_lock(&p->mutex); 348 356 349 - retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); 357 + retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); 350 358 351 359 mutex_unlock(&p->mutex); 352 360 353 361 return retval; 354 362 } 355 363 356 - static long kfd_ioctl_set_memory_policy(struct file *filep, 357 - struct kfd_process *p, void __user *arg) 364 + static int kfd_ioctl_set_memory_policy(struct file *filep, 365 + struct kfd_process *p, void *data) 358 366 { 359 - struct kfd_ioctl_set_memory_policy_args args; 367 + struct kfd_ioctl_set_memory_policy_args *args = data; 360 368 struct kfd_dev *dev; 361 369 int err = 0; 362 370 struct kfd_process_device *pdd; 363 371 enum cache_policy default_policy, alternate_policy; 364 372 365 - if (copy_from_user(&args, arg, sizeof(args))) 366 - return -EFAULT; 367 - 368 - if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT 369 - && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 373 + if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT 374 + && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 370 375 return -EINVAL; 371 376 } 372 377 373 - if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 374 - && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 378 + if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 379 + && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 375 380 return -EINVAL; 376 381 } 377 382 378 - dev = kfd_device_by_id(args.gpu_id); 383 + dev = kfd_device_by_id(args->gpu_id); 379 384 if (dev == NULL) 380 385 return -EINVAL; 381 386 ··· 374 397 375 398 pdd = kfd_bind_process_to_device(dev, p); 376 399 if (IS_ERR(pdd)) { 377 - err = PTR_ERR(pdd); 400 + err = -ESRCH; 378 401 goto out; 379 402 } 380 403 381 - default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 404 + default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 382 405 ? cache_policy_coherent : cache_policy_noncoherent; 383 406 384 407 alternate_policy = 385 - (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 408 + (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 386 409 ? cache_policy_coherent : cache_policy_noncoherent; 387 410 388 411 if (!dev->dqm->set_cache_memory_policy(dev->dqm, 389 412 &pdd->qpd, 390 413 default_policy, 391 414 alternate_policy, 392 - (void __user *)args.alternate_aperture_base, 393 - args.alternate_aperture_size)) 415 + (void __user *)args->alternate_aperture_base, 416 + args->alternate_aperture_size)) 394 417 err = -EINVAL; 395 418 396 419 out: ··· 399 422 return err; 400 423 } 401 424 402 - static long kfd_ioctl_get_clock_counters(struct file *filep, 403 - struct kfd_process *p, void __user *arg) 425 + static int kfd_ioctl_get_clock_counters(struct file *filep, 426 + struct kfd_process *p, void *data) 404 427 { 405 - struct kfd_ioctl_get_clock_counters_args args; 428 + struct kfd_ioctl_get_clock_counters_args *args = data; 406 429 struct kfd_dev *dev; 407 430 struct timespec time; 408 431 409 - if (copy_from_user(&args, arg, sizeof(args))) 410 - return -EFAULT; 411 - 412 - dev = kfd_device_by_id(args.gpu_id); 432 + dev = kfd_device_by_id(args->gpu_id); 413 433 if (dev == NULL) 414 434 return -EINVAL; 415 435 416 436 /* Reading GPU clock counter from KGD */ 417 - args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); 437 + args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); 418 438 419 439 /* No access to rdtsc. Using raw monotonic time */ 420 440 getrawmonotonic(&time); 421 - args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); 441 + args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); 422 442 423 443 get_monotonic_boottime(&time); 424 - args.system_clock_counter = (uint64_t)timespec_to_ns(&time); 444 + args->system_clock_counter = (uint64_t)timespec_to_ns(&time); 425 445 426 446 /* Since the counter is in nano-seconds we use 1GHz frequency */ 427 - args.system_clock_freq = 1000000000; 428 - 429 - if (copy_to_user(arg, &args, sizeof(args))) 430 - return -EFAULT; 447 + args->system_clock_freq = 1000000000; 431 448 432 449 return 0; 433 450 } 434 451 435 452 436 453 static int kfd_ioctl_get_process_apertures(struct file *filp, 437 - struct kfd_process *p, void __user *arg) 454 + struct kfd_process *p, void *data) 438 455 { 439 - struct kfd_ioctl_get_process_apertures_args args; 456 + struct kfd_ioctl_get_process_apertures_args *args = data; 440 457 struct kfd_process_device_apertures *pAperture; 441 458 struct kfd_process_device *pdd; 442 459 443 460 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); 444 461 445 - if (copy_from_user(&args, arg, sizeof(args))) 446 - return -EFAULT; 447 - 448 - args.num_of_nodes = 0; 462 + args->num_of_nodes = 0; 449 463 450 464 mutex_lock(&p->mutex); 451 465 ··· 445 477 /* Run over all pdd of the process */ 446 478 pdd = kfd_get_first_process_device_data(p); 447 479 do { 448 - pAperture = &args.process_apertures[args.num_of_nodes]; 480 + pAperture = 481 + &args->process_apertures[args->num_of_nodes]; 449 482 pAperture->gpu_id = pdd->dev->id; 450 483 pAperture->lds_base = pdd->lds_base; 451 484 pAperture->lds_limit = pdd->lds_limit; ··· 456 487 pAperture->scratch_limit = pdd->scratch_limit; 457 488 458 489 dev_dbg(kfd_device, 459 - "node id %u\n", args.num_of_nodes); 490 + "node id %u\n", args->num_of_nodes); 460 491 dev_dbg(kfd_device, 461 492 "gpu id %u\n", pdd->dev->id); 462 493 dev_dbg(kfd_device, ··· 472 503 dev_dbg(kfd_device, 473 504 "scratch_limit %llX\n", pdd->scratch_limit); 474 505 475 - args.num_of_nodes++; 506 + args->num_of_nodes++; 476 507 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && 477 - (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 508 + (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); 478 509 } 479 510 480 511 mutex_unlock(&p->mutex); 481 512 482 - if (copy_to_user(arg, &args, sizeof(args))) 483 - return -EFAULT; 484 - 485 513 return 0; 486 514 } 515 + 516 + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 517 + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 518 + 519 + /** Ioctl table */ 520 + static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { 521 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, 522 + kfd_ioctl_get_version, 0), 523 + 524 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, 525 + kfd_ioctl_create_queue, 0), 526 + 527 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, 528 + kfd_ioctl_destroy_queue, 0), 529 + 530 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, 531 + kfd_ioctl_set_memory_policy, 0), 532 + 533 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, 534 + kfd_ioctl_get_clock_counters, 0), 535 + 536 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, 537 + kfd_ioctl_get_process_apertures, 0), 538 + 539 + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, 540 + kfd_ioctl_update_queue, 0), 541 + }; 542 + 543 + #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 487 544 488 545 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 489 546 { 490 547 struct kfd_process *process; 491 - long err = -EINVAL; 548 + amdkfd_ioctl_t *func; 549 + const struct amdkfd_ioctl_desc *ioctl = NULL; 550 + unsigned int nr = _IOC_NR(cmd); 551 + char stack_kdata[128]; 552 + char *kdata = NULL; 553 + unsigned int usize, asize; 554 + int retcode = -EINVAL; 492 555 493 - dev_dbg(kfd_device, 494 - "ioctl cmd 0x%x (#%d), arg 0x%lx\n", 495 - cmd, _IOC_NR(cmd), arg); 556 + if (nr >= AMDKFD_CORE_IOCTL_COUNT) 557 + goto err_i1; 558 + 559 + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { 560 + u32 amdkfd_size; 561 + 562 + ioctl = &amdkfd_ioctls[nr]; 563 + 564 + amdkfd_size = _IOC_SIZE(ioctl->cmd); 565 + usize = asize = _IOC_SIZE(cmd); 566 + if (amdkfd_size > asize) 567 + asize = amdkfd_size; 568 + 569 + cmd = ioctl->cmd; 570 + } else 571 + goto err_i1; 572 + 573 + dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); 496 574 497 575 process = kfd_get_process(current); 498 - if (IS_ERR(process)) 499 - return PTR_ERR(process); 500 - 501 - switch (cmd) { 502 - case KFD_IOC_GET_VERSION: 503 - err = kfd_ioctl_get_version(filep, process, (void __user *)arg); 504 - break; 505 - case KFD_IOC_CREATE_QUEUE: 506 - err = kfd_ioctl_create_queue(filep, process, 507 - (void __user *)arg); 508 - break; 509 - 510 - case KFD_IOC_DESTROY_QUEUE: 511 - err = kfd_ioctl_destroy_queue(filep, process, 512 - (void __user *)arg); 513 - break; 514 - 515 - case KFD_IOC_SET_MEMORY_POLICY: 516 - err = kfd_ioctl_set_memory_policy(filep, process, 517 - (void __user *)arg); 518 - break; 519 - 520 - case KFD_IOC_GET_CLOCK_COUNTERS: 521 - err = kfd_ioctl_get_clock_counters(filep, process, 522 - (void __user *)arg); 523 - break; 524 - 525 - case KFD_IOC_GET_PROCESS_APERTURES: 526 - err = kfd_ioctl_get_process_apertures(filep, process, 527 - (void __user *)arg); 528 - break; 529 - 530 - case KFD_IOC_UPDATE_QUEUE: 531 - err = kfd_ioctl_update_queue(filep, process, 532 - (void __user *)arg); 533 - break; 534 - 535 - default: 536 - dev_err(kfd_device, 537 - "unknown ioctl cmd 0x%x, arg 0x%lx)\n", 538 - cmd, arg); 539 - err = -EINVAL; 540 - break; 576 + if (IS_ERR(process)) { 577 + dev_dbg(kfd_device, "no process\n"); 578 + goto err_i1; 541 579 } 542 580 543 - if (err < 0) 544 - dev_err(kfd_device, 545 - "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", 546 - err, cmd, _IOC_NR(cmd)); 581 + /* Do not trust userspace, use our own definition */ 582 + func = ioctl->func; 547 583 548 - return err; 584 + if (unlikely(!func)) { 585 + dev_dbg(kfd_device, "no function\n"); 586 + retcode = -EINVAL; 587 + goto err_i1; 588 + } 589 + 590 + if (cmd & (IOC_IN | IOC_OUT)) { 591 + if (asize <= sizeof(stack_kdata)) { 592 + kdata = stack_kdata; 593 + } else { 594 + kdata = kmalloc(asize, GFP_KERNEL); 595 + if (!kdata) { 596 + retcode = -ENOMEM; 597 + goto err_i1; 598 + } 599 + } 600 + if (asize > usize) 601 + memset(kdata + usize, 0, asize - usize); 602 + } 603 + 604 + if (cmd & IOC_IN) { 605 + if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { 606 + retcode = -EFAULT; 607 + goto err_i1; 608 + } 609 + } else if (cmd & IOC_OUT) { 610 + memset(kdata, 0, usize); 611 + } 612 + 613 + retcode = func(filep, process, kdata); 614 + 615 + if (cmd & IOC_OUT) 616 + if (copy_to_user((void __user *)arg, kdata, usize) != 0) 617 + retcode = -EFAULT; 618 + 619 + err_i1: 620 + if (!ioctl) 621 + dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 622 + task_pid_nr(current), cmd, nr); 623 + 624 + if (kdata != stack_kdata) 625 + kfree(kdata); 626 + 627 + if (retcode) 628 + dev_dbg(kfd_device, "ret = %d\n", retcode); 629 + 630 + return retcode; 549 631 } 550 632 551 633 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
+26 -2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 161 161 { 162 162 int bit = qpd->vmid - KFD_VMID_START_OFFSET; 163 163 164 + /* Release the vmid mapping */ 165 + set_pasid_vmid_mapping(dqm, 0, qpd->vmid); 166 + 164 167 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); 165 168 qpd->vmid = 0; 166 169 q->properties.vmid = 0; ··· 275 272 return retval; 276 273 } 277 274 275 + pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", 276 + q->pipe, 277 + q->queue); 278 + 279 + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, 280 + q->queue, q->properties.write_ptr); 281 + if (retval != 0) { 282 + deallocate_hqd(dqm, q); 283 + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); 284 + return retval; 285 + } 286 + 278 287 return 0; 279 288 } 280 289 ··· 335 320 { 336 321 int retval; 337 322 struct mqd_manager *mqd; 323 + bool prev_active = false; 338 324 339 325 BUG_ON(!dqm || !q || !q->mqd); 340 326 ··· 346 330 return -ENOMEM; 347 331 } 348 332 349 - retval = mqd->update_mqd(mqd, q->mqd, &q->properties); 350 333 if (q->properties.is_active == true) 334 + prev_active = true; 335 + 336 + /* 337 + * 338 + * check active state vs. the previous state 339 + * and modify counter accordingly 340 + */ 341 + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); 342 + if ((q->properties.is_active == true) && (prev_active == false)) 351 343 dqm->queue_count++; 352 - else 344 + else if ((q->properties.is_active == false) && (prev_active == true)) 353 345 dqm->queue_count--; 354 346 355 347 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
··· 184 184 uint32_t queue_id) 185 185 { 186 186 187 - return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, 187 + return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, 188 188 pipe_id, queue_id); 189 189 190 190 }
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
··· 32 32 { 33 33 pasid_limit = max_num_of_processes; 34 34 35 - pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL); 35 + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); 36 36 if (!pasid_bitmap) 37 37 return -ENOMEM; 38 38
+18
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 463 463 bool is_32bit_user_mode; 464 464 }; 465 465 466 + /** 467 + * Ioctl function type. 468 + * 469 + * \param filep pointer to file structure. 470 + * \param p amdkfd process pointer. 471 + * \param data pointer to arg that was copied from user. 472 + */ 473 + typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 474 + void *data); 475 + 476 + struct amdkfd_ioctl_desc { 477 + unsigned int cmd; 478 + int flags; 479 + amdkfd_ioctl_t *func; 480 + unsigned int cmd_drv; 481 + const char *name; 482 + }; 483 + 466 484 void kfd_process_create_wq(void); 467 485 void kfd_process_destroy_wq(void); 468 486 struct kfd_process *kfd_create_process(const struct task_struct *);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 921 921 uint32_t i = 0; 922 922 923 923 list_for_each_entry(dev, &topology_device_list, list) { 924 - ret = kfd_build_sysfs_node_entry(dev, 0); 924 + ret = kfd_build_sysfs_node_entry(dev, i); 925 925 if (ret < 0) 926 926 return ret; 927 927 i++;
+1 -1
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
··· 183 183 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 184 184 uint32_t queue_id, uint32_t __user *wptr); 185 185 186 - bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address, 186 + bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, 187 187 uint32_t pipe_id, uint32_t queue_id); 188 188 189 189 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
-2
drivers/gpu/drm/i915/i915_drv.h
··· 1756 1756 */ 1757 1757 struct workqueue_struct *dp_wq; 1758 1758 1759 - uint32_t bios_vgacntr; 1760 - 1761 1759 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1762 1760 struct { 1763 1761 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
+7 -1
drivers/gpu/drm/i915/i915_gem.c
··· 1048 1048 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1049 1049 struct drm_file *file) 1050 1050 { 1051 + struct drm_i915_private *dev_priv = dev->dev_private; 1051 1052 struct drm_i915_gem_pwrite *args = data; 1052 1053 struct drm_i915_gem_object *obj; 1053 1054 int ret; ··· 1068 1067 return -EFAULT; 1069 1068 } 1070 1069 1070 + intel_runtime_pm_get(dev_priv); 1071 + 1071 1072 ret = i915_mutex_lock_interruptible(dev); 1072 1073 if (ret) 1073 - return ret; 1074 + goto put_rpm; 1074 1075 1075 1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1076 1077 if (&obj->base == NULL) { ··· 1124 1121 drm_gem_object_unreference(&obj->base); 1125 1122 unlock: 1126 1123 mutex_unlock(&dev->struct_mutex); 1124 + put_rpm: 1125 + intel_runtime_pm_put(dev_priv); 1126 + 1127 1127 return ret; 1128 1128 } 1129 1129
+2 -4
drivers/gpu/drm/i915/i915_irq.c
··· 3725 3725 if ((iir & flip_pending) == 0) 3726 3726 goto check_page_flip; 3727 3727 3728 - intel_prepare_page_flip(dev, plane); 3729 - 3730 3728 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3731 3729 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3732 3730 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence ··· 3734 3736 if (I915_READ16(ISR) & flip_pending) 3735 3737 goto check_page_flip; 3736 3738 3739 + intel_prepare_page_flip(dev, plane); 3737 3740 intel_finish_page_flip(dev, pipe); 3738 3741 return true; 3739 3742 ··· 3906 3907 if ((iir & flip_pending) == 0) 3907 3908 goto check_page_flip; 3908 3909 3909 - intel_prepare_page_flip(dev, plane); 3910 - 3911 3910 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3912 3911 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3913 3912 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence ··· 3915 3918 if (I915_READ(ISR) & flip_pending) 3916 3919 goto check_page_flip; 3917 3920 3921 + intel_prepare_page_flip(dev, plane); 3918 3922 intel_finish_page_flip(dev, pipe); 3919 3923 return true; 3920 3924
+1 -7
drivers/gpu/drm/i915/intel_display.c
··· 13057 13057 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 13058 13058 udelay(300); 13059 13059 13060 - /* 13061 - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 13062 - * from S3 without preserving (some of?) the other bits. 13063 - */ 13064 - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 13060 + I915_WRITE(vga_reg, VGA_DISP_DISABLE); 13065 13061 POSTING_READ(vga_reg); 13066 13062 } 13067 13063 ··· 13142 13146 13143 13147 intel_shared_dpll_init(dev); 13144 13148 13145 - /* save the BIOS value before clobbering it */ 13146 - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 13147 13149 /* Just disable it once at startup */ 13148 13150 i915_disable_vga(dev); 13149 13151 intel_setup_outputs(dev);
-27
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 615 615 vlv_power_sequencer_reset(dev_priv); 616 616 } 617 617 618 - static void check_power_well_state(struct drm_i915_private *dev_priv, 619 - struct i915_power_well *power_well) 620 - { 621 - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); 622 - 623 - if (power_well->always_on || !i915.disable_power_well) { 624 - if (!enabled) 625 - goto mismatch; 626 - 627 - return; 628 - } 629 - 630 - if (enabled != (power_well->count > 0)) 631 - goto mismatch; 632 - 633 - return; 634 - 635 - mismatch: 636 - WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", 637 - power_well->name, power_well->always_on, enabled, 638 - power_well->count, i915.disable_power_well); 639 - } 640 - 641 618 /** 642 619 * intel_display_power_get - grab a power domain reference 643 620 * @dev_priv: i915 device instance ··· 646 669 power_well->ops->enable(dev_priv, power_well); 647 670 power_well->hw_enabled = true; 648 671 } 649 - 650 - check_power_well_state(dev_priv, power_well); 651 672 } 652 673 653 674 power_domains->domain_use_count[domain]++; ··· 684 709 power_well->hw_enabled = false; 685 710 power_well->ops->disable(dev_priv, power_well); 686 711 } 687 - 688 - check_power_well_state(dev_priv, power_well); 689 712 } 690 713 691 714 mutex_unlock(&power_domains->lock);
+2 -2
drivers/gpu/drm/nouveau/core/core/event.c
··· 26 26 void 27 27 nvkm_event_put(struct nvkm_event *event, u32 types, int index) 28 28 { 29 - BUG_ON(!spin_is_locked(&event->refs_lock)); 29 + assert_spin_locked(&event->refs_lock); 30 30 while (types) { 31 31 int type = __ffs(types); types &= ~(1 << type); 32 32 if (--event->refs[index * event->types_nr + type] == 0) { ··· 39 39 void 40 40 nvkm_event_get(struct nvkm_event *event, u32 types, int index) 41 41 { 42 - BUG_ON(!spin_is_locked(&event->refs_lock)); 42 + assert_spin_locked(&event->refs_lock); 43 43 while (types) { 44 44 int type = __ffs(types); types &= ~(1 << type); 45 45 if (++event->refs[index * event->types_nr + type] == 1) {
+1 -1
drivers/gpu/drm/nouveau/core/core/notify.c
··· 98 98 struct nvkm_event *event = notify->event; 99 99 unsigned long flags; 100 100 101 - BUG_ON(!spin_is_locked(&event->list_lock)); 101 + assert_spin_locked(&event->list_lock); 102 102 BUG_ON(size != notify->size); 103 103 104 104 spin_lock_irqsave(&event->refs_lock, flags);
+33
drivers/gpu/drm/nouveau/core/engine/device/nve0.c
··· 249 249 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 250 250 device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; 251 251 break; 252 + case 0x106: 253 + device->cname = "GK208B"; 254 + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 255 + device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; 256 + device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; 257 + device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass; 258 + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; 259 + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; 260 + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 261 + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; 262 + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; 263 + device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 264 + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 265 + device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; 266 + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; 267 + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; 268 + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; 269 + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 270 + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 271 + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; 272 + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; 273 + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; 274 + device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; 275 + device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; 276 + device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; 277 + device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; 278 + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 279 + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 280 + device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 281 + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; 282 + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; 283 + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 284 + break; 252 285 case 0x108: 253 286 device->cname = "GK208"; 254 287 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+4 -2
drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
··· 44 44 pramin_fini(void *data) 45 45 { 46 46 struct priv *priv = data; 47 - nv_wr32(priv->bios, 0x001700, priv->bar0); 48 - kfree(priv); 47 + if (priv) { 48 + nv_wr32(priv->bios, 0x001700, priv->bar0); 49 + kfree(priv); 50 + } 49 51 } 50 52 51 53 static void *
+51 -14
drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
··· 24 24 25 25 #include "nv50.h" 26 26 27 + struct nvaa_ram_priv { 28 + struct nouveau_ram base; 29 + u64 poller_base; 30 + }; 31 + 27 32 static int 28 33 nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 29 34 struct nouveau_oclass *oclass, void *data, u32 datasize, 30 35 struct nouveau_object **pobject) 31 36 { 32 - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ 33 - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ 37 + u32 rsvd_head = ( 256 * 1024); /* vga memory */ 38 + u32 rsvd_tail = (1024 * 1024); /* vbios etc */ 34 39 struct nouveau_fb *pfb = nouveau_fb(parent); 35 - struct nouveau_ram *ram; 40 + struct nvaa_ram_priv *priv; 36 41 int ret; 37 42 38 - ret = nouveau_ram_create(parent, engine, oclass, &ram); 39 - *pobject = nv_object(ram); 43 + ret = nouveau_ram_create(parent, engine, oclass, &priv); 44 + *pobject = nv_object(priv); 40 45 if (ret) 41 46 return ret; 42 47 43 - ram->size = nv_rd32(pfb, 0x10020c); 44 - ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); 48 + priv->base.type = NV_MEM_TYPE_STOLEN; 49 + priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 50 + priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12; 45 51 46 - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - 47 - (rsvd_head + rsvd_tail), 1); 52 + rsvd_tail += 0x1000; 53 + priv->poller_base = priv->base.size - rsvd_tail; 54 + 55 + ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12, 56 + (priv->base.size - (rsvd_head + rsvd_tail)) >> 12, 57 + 1); 48 58 if (ret) 49 59 return ret; 50 60 51 - ram->type = NV_MEM_TYPE_STOLEN; 52 - ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; 53 - ram->get = nv50_ram_get; 54 - ram->put = nv50_ram_put; 61 + priv->base.get = nv50_ram_get; 62 + priv->base.put = nv50_ram_put; 63 + return 0; 64 + } 65 + 66 + static int 67 + nvaa_ram_init(struct nouveau_object *object) 68 + { 69 + struct nouveau_fb *pfb = nouveau_fb(object); 70 + struct nvaa_ram_priv *priv = (void *)object; 71 + int ret; 72 + u64 dniso, hostnb, flush; 73 + 74 + ret = nouveau_ram_init(&priv->base); 75 + if (ret) 76 + return ret; 77 + 78 + dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1; 79 + hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1; 80 + flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1; 81 + 82 + /* Enable NISO poller for various clients and set their associated 83 + * read address, only for MCP77/78 and MCP79/7A. (fd#25701) 84 + */ 85 + nv_wr32(pfb, 0x100c18, dniso); 86 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001); 87 + nv_wr32(pfb, 0x100c1c, hostnb); 88 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002); 89 + nv_wr32(pfb, 0x100c24, flush); 90 + nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000); 91 + 55 92 return 0; 56 93 } 57 94 ··· 97 60 .ofuncs = &(struct nouveau_ofuncs) { 98 61 .ctor = nvaa_ram_ctor, 99 62 .dtor = _nouveau_ram_dtor, 100 - .init = _nouveau_ram_init, 63 + .init = nvaa_ram_init, 101 64 .fini = _nouveau_ram_fini, 102 65 }, 103 66 };
-8
drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
··· 24 24 25 25 #include "nv04.h" 26 26 27 - static void 28 - nv4c_mc_msi_rearm(struct nouveau_mc *pmc) 29 - { 30 - struct nv04_mc_priv *priv = (void *)pmc; 31 - nv_wr08(priv, 0x088050, 0xff); 32 - } 33 - 34 27 struct nouveau_oclass * 35 28 nv4c_mc_oclass = &(struct nouveau_mc_oclass) { 36 29 .base.handle = NV_SUBDEV(MC, 0x4c), ··· 34 41 .fini = _nouveau_mc_fini, 35 42 }, 36 43 .intr = nv04_mc_intr, 37 - .msi_rearm = nv4c_mc_msi_rearm, 38 44 }.base;
+3 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1572 1572 * so use the DMA API for them. 1573 1573 */ 1574 1574 if (!nv_device_is_cpu_coherent(device) && 1575 - ttm->caching_state == tt_uncached) 1575 + ttm->caching_state == tt_uncached) { 1576 1576 ttm_dma_unpopulate(ttm_dma, dev->dev); 1577 + return; 1578 + } 1577 1579 1578 1580 #if __OS_HAS_AGP 1579 1581 if (drm->agp.stat == ENABLED) {
+31 -6
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 36 36 nouveau_gem_object_del(struct drm_gem_object *gem) 37 37 { 38 38 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 39 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 39 40 struct ttm_buffer_object *bo = &nvbo->bo; 41 + struct device *dev = drm->dev->dev; 42 + int ret; 43 + 44 + ret = pm_runtime_get_sync(dev); 45 + if (WARN_ON(ret < 0 && ret != -EACCES)) 46 + return; 40 47 41 48 if (gem->import_attach) 42 49 drm_prime_gem_destroy(gem, nvbo->bo.sg); ··· 53 46 /* reset filp so nouveau_bo_del_ttm() can test for it */ 54 47 gem->filp = NULL; 55 48 ttm_bo_unref(&bo); 49 + 50 + pm_runtime_mark_last_busy(dev); 51 + pm_runtime_put_autosuspend(dev); 56 52 } 57 53 58 54 int ··· 63 53 { 64 54 struct nouveau_cli *cli = nouveau_cli(file_priv); 65 55 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 56 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 66 57 struct nouveau_vma *vma; 58 + struct device *dev = drm->dev->dev; 67 59 int ret; 68 60 69 61 if (!cli->vm) ··· 83 71 goto out; 84 72 } 85 73 86 - ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 87 - if (ret) { 88 - kfree(vma); 74 + ret = pm_runtime_get_sync(dev); 75 + if (ret < 0 && ret != -EACCES) 89 76 goto out; 90 - } 77 + 78 + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); 79 + if (ret) 80 + kfree(vma); 81 + 82 + pm_runtime_mark_last_busy(dev); 83 + pm_runtime_put_autosuspend(dev); 91 84 } else { 92 85 vma->refcount++; 93 86 } ··· 146 129 { 147 130 struct nouveau_cli *cli = nouveau_cli(file_priv); 148 131 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 132 + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 133 + struct device *dev = drm->dev->dev; 149 134 struct nouveau_vma *vma; 150 135 int ret; 151 136 ··· 160 141 161 142 vma = nouveau_bo_vma_find(nvbo, cli->vm); 162 143 if (vma) { 163 - if (--vma->refcount == 0) 164 - nouveau_gem_object_unmap(nvbo, vma); 144 + if (--vma->refcount == 0) { 145 + ret = pm_runtime_get_sync(dev); 146 + if (!WARN_ON(ret < 0 && ret != -EACCES)) { 147 + nouveau_gem_object_unmap(nvbo, vma); 148 + pm_runtime_mark_last_busy(dev); 149 + pm_runtime_put_autosuspend(dev); 150 + } 151 + } 165 152 } 166 153 ttm_bo_unreserve(&nvbo->bo); 167 154 }
+4 -4
drivers/gpu/drm/radeon/atombios_crtc.c
··· 1851 1851 return pll; 1852 1852 } 1853 1853 /* otherwise, pick one of the plls */ 1854 - if ((rdev->family == CHIP_KAVERI) || 1855 - (rdev->family == CHIP_KABINI) || 1854 + if ((rdev->family == CHIP_KABINI) || 1856 1855 (rdev->family == CHIP_MULLINS)) { 1857 - /* KB/KV/ML has PPLL1 and PPLL2 */ 1856 + /* KB/ML has PPLL1 and PPLL2 */ 1858 1857 pll_in_use = radeon_get_pll_use_mask(crtc); 1859 1858 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1860 1859 return ATOM_PPLL2; ··· 1862 1863 DRM_ERROR("unable to allocate a PPLL\n"); 1863 1864 return ATOM_PPLL_INVALID; 1864 1865 } else { 1865 - /* CI has PPLL0, PPLL1, and PPLL2 */ 1866 + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ 1866 1867 pll_in_use = radeon_get_pll_use_mask(crtc); 1867 1868 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1868 1869 return ATOM_PPLL2; ··· 2154 2155 case ATOM_PPLL0: 2155 2156 /* disable the ppll */ 2156 2157 if ((rdev->family == CHIP_ARUBA) || 2158 + (rdev->family == CHIP_KAVERI) || 2157 2159 (rdev->family == CHIP_BONAIRE) || 2158 2160 (rdev->family == CHIP_HAWAII)) 2159 2161 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+4
drivers/gpu/drm/radeon/atombios_dp.c
··· 492 492 struct radeon_connector_atom_dig *dig_connector; 493 493 int dp_clock; 494 494 495 + if ((mode->clock > 340000) && 496 + (!radeon_connector_is_dp12_capable(connector))) 497 + return MODE_CLOCK_HIGH; 498 + 495 499 if (!radeon_connector->con_priv) 496 500 return MODE_CLOCK_HIGH; 497 501 dig_connector = radeon_connector->con_priv;
+2
drivers/gpu/drm/radeon/cikd.h
··· 2156 2156 #define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu 2157 2157 #define ATC_VM_APERTURE1_LOW_ADDR 0x3304u 2158 2158 2159 + #define IH_VMID_0_LUT 0x3D40u 2160 + 2159 2161 #endif
+1 -1
drivers/gpu/drm/radeon/dce3_1_afmt.c
··· 103 103 } 104 104 105 105 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); 106 - if (sad_count < 0) { 106 + if (sad_count <= 0) { 107 107 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 108 108 return; 109 109 }
+4 -6
drivers/gpu/drm/radeon/kv_dpm.c
··· 2745 2745 pi->enable_auto_thermal_throttling = true; 2746 2746 pi->disable_nb_ps3_in_battery = false; 2747 2747 if (radeon_bapm == -1) { 2748 - /* There are stability issues reported on with 2749 - * bapm enabled on an asrock system. 2750 - */ 2751 - if (rdev->pdev->subsystem_vendor == 0x1849) 2752 - pi->bapm_enable = false; 2753 - else 2748 + /* only enable bapm on KB, ML by default */ 2749 + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2754 2750 pi->bapm_enable = true; 2751 + else 2752 + pi->bapm_enable = false; 2755 2753 } else if (radeon_bapm == 0) { 2756 2754 pi->bapm_enable = false; 2757 2755 } else {
+20 -3
drivers/gpu/drm/radeon/radeon_kfd.c
··· 72 72 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 73 73 uint32_t queue_id, uint32_t __user *wptr); 74 74 75 - static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 75 + static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 76 76 uint32_t pipe_id, uint32_t queue_id); 77 77 78 78 static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, ··· 92 92 .init_memory = kgd_init_memory, 93 93 .init_pipeline = kgd_init_pipeline, 94 94 .hqd_load = kgd_hqd_load, 95 - .hqd_is_occupies = kgd_hqd_is_occupies, 95 + .hqd_is_occupied = kgd_hqd_is_occupied, 96 96 .hqd_destroy = kgd_hqd_destroy, 97 97 .get_fw_version = get_fw_version 98 98 }; ··· 101 101 102 102 bool radeon_kfd_init(void) 103 103 { 104 + #if defined(CONFIG_HSA_AMD_MODULE) 104 105 bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, 105 106 const struct kgd2kfd_calls**); 106 107 ··· 118 117 } 119 118 120 119 return true; 120 + #elif defined(CONFIG_HSA_AMD) 121 + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { 122 + kgd2kfd = NULL; 123 + 124 + return false; 125 + } 126 + 127 + return true; 128 + #else 129 + return false; 130 + #endif 121 131 } 122 132 123 133 void radeon_kfd_fini(void) ··· 390 378 cpu_relax(); 391 379 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); 392 380 381 + /* Mapping vmid to pasid also for IH block */ 382 + write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t), 383 + pasid_mapping); 384 + 393 385 return 0; 394 386 } 395 387 ··· 533 517 return 0; 534 518 } 535 519 536 - static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, 520 + static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 537 521 uint32_t pipe_id, uint32_t queue_id) 538 522 { 539 523 uint32_t act; ··· 572 556 if (timeout == 0) { 573 557 pr_err("kfd: cp queue preemption time out (%dms)\n", 574 558 temp); 559 + release_queue(kgd); 575 560 return -ETIME; 576 561 } 577 562 msleep(20);
+1 -1
drivers/gpu/drm/radeon/radeon_state.c
··· 1703 1703 u32 format; 1704 1704 u32 *buffer; 1705 1705 const u8 __user *data; 1706 - int size, dwords, tex_width, blit_width, spitch; 1706 + unsigned int size, dwords, tex_width, blit_width, spitch; 1707 1707 u32 height; 1708 1708 int i; 1709 1709 u32 texpitch, microtile;
+2 -1
drivers/hid/Kconfig
··· 27 27 28 28 config HID_BATTERY_STRENGTH 29 29 bool "Battery level reporting for HID devices" 30 - depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY 30 + depends on HID 31 + select POWER_SUPPLY 31 32 default n 32 33 ---help--- 33 34 This option adds support of reporting battery strength (for HID devices
+1
drivers/hid/hid-core.c
··· 1805 1805 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1806 1806 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 1807 1807 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 1808 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, 1808 1809 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 1809 1810 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1810 1811 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+1
drivers/hid/hid-ids.h
··· 526 526 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 527 527 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 528 528 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 529 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a 529 530 #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 530 531 531 532 #define USB_VENDOR_ID_LABTEC 0x1020
+3
drivers/hid/hid-input.c
··· 312 312 USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), 313 313 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 314 314 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 315 + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), 316 + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 317 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 315 318 USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), 316 319 HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, 317 320 {}
+4
drivers/hid/hid-kye.c
··· 323 323 } 324 324 break; 325 325 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 326 + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: 326 327 if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { 327 328 rdesc = mousepen_i608x_rdesc_fixed; 328 329 *rsize = sizeof(mousepen_i608x_rdesc_fixed); ··· 416 415 switch (id->product) { 417 416 case USB_DEVICE_ID_KYE_EASYPEN_I405X: 418 417 case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: 418 + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: 419 419 case USB_DEVICE_ID_KYE_EASYPEN_M610X: 420 420 ret = kye_tablet_enable(hdev); 421 421 if (ret) { ··· 447 445 USB_DEVICE_ID_KYE_EASYPEN_I405X) }, 448 446 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 449 447 USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, 448 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 449 + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, 450 450 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, 451 451 USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 452 452 { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+15 -1
drivers/hid/hid-logitech-dj.c
··· 962 962 963 963 switch (data[0]) { 964 964 case REPORT_ID_DJ_SHORT: 965 + if (size != DJREPORT_SHORT_LENGTH) { 966 + dev_err(&hdev->dev, "DJ report of bad size (%d)", size); 967 + return false; 968 + } 965 969 return logi_dj_dj_event(hdev, report, data, size); 966 970 case REPORT_ID_HIDPP_SHORT: 967 - /* intentional fallthrough */ 971 + if (size != HIDPP_REPORT_SHORT_LENGTH) { 972 + dev_err(&hdev->dev, 973 + "Short HID++ report of bad size (%d)", size); 974 + return false; 975 + } 976 + return logi_dj_hidpp_event(hdev, report, data, size); 968 977 case REPORT_ID_HIDPP_LONG: 978 + if (size != HIDPP_REPORT_LONG_LENGTH) { 979 + dev_err(&hdev->dev, 980 + "Long HID++ report of bad size (%d)", size); 981 + return false; 982 + } 969 983 return logi_dj_hidpp_event(hdev, report, data, size); 970 984 } 971 985
+41
drivers/hid/hid-logitech-hidpp.c
··· 282 282 (report->rap.sub_id == 0x41); 283 283 } 284 284 285 + /** 286 + * hidpp_prefix_name() prefixes the current given name with "Logitech ". 287 + */ 288 + static void hidpp_prefix_name(char **name, int name_length) 289 + { 290 + #define PREFIX_LENGTH 9 /* "Logitech " */ 291 + 292 + int new_length; 293 + char *new_name; 294 + 295 + if (name_length > PREFIX_LENGTH && 296 + strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) 297 + /* The prefix has is already in the name */ 298 + return; 299 + 300 + new_length = PREFIX_LENGTH + name_length; 301 + new_name = kzalloc(new_length, GFP_KERNEL); 302 + if (!new_name) 303 + return; 304 + 305 + snprintf(new_name, new_length, "Logitech %s", *name); 306 + 307 + kfree(*name); 308 + 309 + *name = new_name; 310 + } 311 + 285 312 /* -------------------------------------------------------------------------- */ 286 313 /* HIDP++ 1.0 commands */ 287 314 /* -------------------------------------------------------------------------- */ ··· 348 321 return NULL; 349 322 350 323 memcpy(name, &response.rap.params[2], len); 324 + 325 + /* include the terminating '\0' */ 326 + hidpp_prefix_name(&name, len + 1); 327 + 351 328 return name; 352 329 } 353 330 ··· 528 497 } 529 498 index += ret; 530 499 } 500 + 501 + /* include the terminating '\0' */ 502 + hidpp_prefix_name(&name, __name_length + 1); 531 503 532 504 return name; 533 505 } ··· 828 794 829 795 switch (data[0]) { 830 796 case 0x02: 797 + if (size < 2) { 798 + hid_err(hdev, "Received HID report of bad size (%d)", 799 + size); 800 + return 1; 801 + } 831 802 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { 832 803 input_event(wd->input, EV_KEY, BTN_LEFT, 833 804 !!(data[1] & 0x01)); 834 805 input_event(wd->input, EV_KEY, BTN_RIGHT, 835 806 !!(data[1] & 0x02)); 836 807 input_sync(wd->input); 808 + return 0; 837 809 } else { 838 810 if (size < 21) 839 811 return 1; 840 812 return wtp_mouse_raw_xy_event(hidpp, &data[7]); 841 813 } 842 814 case REPORT_ID_HIDPP_LONG: 815 + /* size is already checked in hidpp_raw_event. */ 843 816 if ((report->fap.feature_index != wd->mt_feature_index) || 844 817 (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) 845 818 return 1;
+6 -2
drivers/hid/hid-roccat-pyra.c
··· 35 35 static void profile_activated(struct pyra_device *pyra, 36 36 unsigned int new_profile) 37 37 { 38 + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) 39 + return; 38 40 pyra->actual_profile = new_profile; 39 41 pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; 40 42 } ··· 259 257 if (off != 0 || count != PYRA_SIZE_SETTINGS) 260 258 return -EINVAL; 261 259 262 - mutex_lock(&pyra->pyra_lock); 263 - 264 260 settings = (struct pyra_settings const *)buf; 261 + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) 262 + return -EINVAL; 263 + 264 + mutex_lock(&pyra->pyra_lock); 265 265 266 266 retval = pyra_set_settings(usb_dev, settings); 267 267 if (retval) {
-5
drivers/hid/i2c-hid/i2c-hid.c
··· 706 706 707 707 static void i2c_hid_stop(struct hid_device *hid) 708 708 { 709 - struct i2c_client *client = hid->driver_data; 710 - struct i2c_hid *ihid = i2c_get_clientdata(client); 711 - 712 709 hid->claimed = 0; 713 - 714 - i2c_hid_free_buffers(ihid); 715 710 } 716 711 717 712 static int i2c_hid_open(struct hid_device *hid)
+1
drivers/hid/usbhid/hid-quirks.c
··· 124 124 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, 125 125 { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, 126 126 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, 127 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, 127 128 { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, 128 129 { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, 129 130 { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+13 -2
drivers/iio/adc/ad799x.c
··· 143 143 case ad7998: 144 144 return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG, 145 145 val); 146 - default: 146 + case ad7992: 147 + case ad7993: 148 + case ad7994: 147 149 return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG, 148 150 val); 151 + default: 152 + /* Will be written when doing a conversion */ 153 + st->config = val; 154 + return 0; 149 155 } 150 156 } 151 157 ··· 161 155 case ad7997: 162 156 case ad7998: 163 157 return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG); 164 - default: 158 + case ad7992: 159 + case ad7993: 160 + case ad7994: 165 161 return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG); 162 + default: 163 + /* No readback support */ 164 + return st->config; 166 165 } 167 166 } 168 167
+3
drivers/iio/inkern.c
··· 449 449 if (val2 == NULL) 450 450 val2 = &unused; 451 451 452 + if(!iio_channel_has_info(chan->channel, info)) 453 + return -EINVAL; 454 + 452 455 if (chan->indio_dev->info->read_raw_multi) { 453 456 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, 454 457 chan->channel, INDIO_MAX_RAW_ELEMENTS,
+44 -16
drivers/input/evdev.c
··· 28 28 #include <linux/cdev.h> 29 29 #include "input-compat.h" 30 30 31 + enum evdev_clock_type { 32 + EV_CLK_REAL = 0, 33 + EV_CLK_MONO, 34 + EV_CLK_BOOT, 35 + EV_CLK_MAX 36 + }; 37 + 31 38 struct evdev { 32 39 int open; 33 40 struct input_handle handle; ··· 56 49 struct fasync_struct *fasync; 57 50 struct evdev *evdev; 58 51 struct list_head node; 59 - int clkid; 52 + int clk_type; 60 53 bool revoked; 61 54 unsigned int bufsize; 62 55 struct input_event buffer[]; 63 56 }; 57 + 58 + static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid) 59 + { 60 + switch (clkid) { 61 + 62 + case CLOCK_REALTIME: 63 + client->clk_type = EV_CLK_REAL; 64 + break; 65 + case CLOCK_MONOTONIC: 66 + client->clk_type = EV_CLK_MONO; 67 + break; 68 + case CLOCK_BOOTTIME: 69 + client->clk_type = EV_CLK_BOOT; 70 + break; 71 + default: 72 + return -EINVAL; 73 + } 74 + 75 + return 0; 76 + } 64 77 65 78 /* flush queued events of type @type, caller must hold client->buffer_lock */ 66 79 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type) ··· 135 108 struct input_event ev; 136 109 ktime_t time; 137 110 138 - time = (client->clkid == CLOCK_MONOTONIC) ? 139 - ktime_get() : ktime_get_real(); 111 + time = client->clk_type == EV_CLK_REAL ? 112 + ktime_get_real() : 113 + client->clk_type == EV_CLK_MONO ? 114 + ktime_get() : 115 + ktime_get_boottime(); 140 116 141 117 ev.time = ktime_to_timeval(time); 142 118 ev.type = EV_SYN; ··· 189 159 190 160 static void evdev_pass_values(struct evdev_client *client, 191 161 const struct input_value *vals, unsigned int count, 192 - ktime_t mono, ktime_t real) 162 + ktime_t *ev_time) 193 163 { 194 164 struct evdev *evdev = client->evdev; 195 165 const struct input_value *v; ··· 199 169 if (client->revoked) 200 170 return; 201 171 202 - event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? 203 - mono : real); 172 + event.time = ktime_to_timeval(ev_time[client->clk_type]); 204 173 205 174 /* Interrupts are disabled, just acquire the lock. */ 206 175 spin_lock(&client->buffer_lock); ··· 227 198 { 228 199 struct evdev *evdev = handle->private; 229 200 struct evdev_client *client; 230 - ktime_t time_mono, time_real; 201 + ktime_t ev_time[EV_CLK_MAX]; 231 202 232 - time_mono = ktime_get(); 233 - time_real = ktime_mono_to_real(time_mono); 203 + ev_time[EV_CLK_MONO] = ktime_get(); 204 + ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 205 + ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], 206 + TK_OFFS_BOOT); 234 207 235 208 rcu_read_lock(); 236 209 237 210 client = rcu_dereference(evdev->grab); 238 211 239 212 if (client) 240 - evdev_pass_values(client, vals, count, time_mono, time_real); 213 + evdev_pass_values(client, vals, count, ev_time); 241 214 else 242 215 list_for_each_entry_rcu(client, &evdev->client_list, node) 243 - evdev_pass_values(client, vals, count, 244 - time_mono, time_real); 216 + evdev_pass_values(client, vals, count, ev_time); 245 217 246 218 rcu_read_unlock(); 247 219 } ··· 907 877 case EVIOCSCLOCKID: 908 878 if (copy_from_user(&i, p, sizeof(unsigned int))) 909 879 return -EFAULT; 910 - if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME) 911 - return -EINVAL; 912 - client->clkid = i; 913 - return 0; 880 + 881 + return evdev_set_clk_type(client, i); 914 882 915 883 case EVIOCGKEYCODE: 916 884 return evdev_handle_get_keycode(dev, p);
+13 -9
drivers/input/input.c
··· 1974 1974 1975 1975 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 1976 1976 1977 - for (i = 0; i < ABS_CNT; i++) { 1978 - if (test_bit(i, dev->absbit)) { 1979 - if (input_is_mt_axis(i)) 1980 - events += mt_slots; 1981 - else 1982 - events++; 1977 + if (test_bit(EV_ABS, dev->evbit)) { 1978 + for (i = 0; i < ABS_CNT; i++) { 1979 + if (test_bit(i, dev->absbit)) { 1980 + if (input_is_mt_axis(i)) 1981 + events += mt_slots; 1982 + else 1983 + events++; 1984 + } 1983 1985 } 1984 1986 } 1985 1987 1986 - for (i = 0; i < REL_CNT; i++) 1987 - if (test_bit(i, dev->relbit)) 1988 - events++; 1988 + if (test_bit(EV_REL, dev->evbit)) { 1989 + for (i = 0; i < REL_CNT; i++) 1990 + if (test_bit(i, dev->relbit)) 1991 + events++; 1992 + } 1989 1993 1990 1994 /* Make room for KEY and MSC events */ 1991 1995 events += 7;
+1
drivers/input/keyboard/Kconfig
··· 559 559 config KEYBOARD_STMPE 560 560 tristate "STMPE keypad support" 561 561 depends on MFD_STMPE 562 + depends on OF 562 563 select INPUT_MATRIXKMAP 563 564 help 564 565 Say Y here if you want to use the keypad controller on STMPE I/O
+57 -57
drivers/input/keyboard/gpio_keys.c
··· 35 35 struct gpio_button_data { 36 36 const struct gpio_keys_button *button; 37 37 struct input_dev *input; 38 - struct timer_list timer; 39 - struct work_struct work; 40 - unsigned int timer_debounce; /* in msecs */ 38 + 39 + struct timer_list release_timer; 40 + unsigned int release_delay; /* in msecs, for IRQ-only buttons */ 41 + 42 + struct delayed_work work; 43 + unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */ 44 + 41 45 unsigned int irq; 42 46 spinlock_t lock; 43 47 bool disabled; ··· 120 116 { 121 117 if (!bdata->disabled) { 122 118 /* 123 - * Disable IRQ and possible debouncing timer. 119 + * Disable IRQ and associated timer/work structure. 124 120 */ 125 121 disable_irq(bdata->irq); 126 - if (bdata->timer_debounce) 127 - del_timer_sync(&bdata->timer); 122 + 123 + if (gpio_is_valid(bdata->button->gpio)) 124 + cancel_delayed_work_sync(&bdata->work); 125 + else 126 + del_timer_sync(&bdata->release_timer); 128 127 129 128 bdata->disabled = true; 130 129 } ··· 350 343 static void gpio_keys_gpio_work_func(struct work_struct *work) 351 344 { 352 345 struct gpio_button_data *bdata = 353 - container_of(work, struct gpio_button_data, work); 346 + container_of(work, struct gpio_button_data, work.work); 354 347 355 348 gpio_keys_gpio_report_event(bdata); 356 349 357 350 if (bdata->button->wakeup) 358 351 pm_relax(bdata->input->dev.parent); 359 - } 360 - 361 - static void gpio_keys_gpio_timer(unsigned long _data) 362 - { 363 - struct gpio_button_data *bdata = (struct gpio_button_data *)_data; 364 - 365 - schedule_work(&bdata->work); 366 352 } 367 353 368 354 static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) ··· 366 366 367 367 if (bdata->button->wakeup) 368 368 pm_stay_awake(bdata->input->dev.parent); 369 - if (bdata->timer_debounce) 370 - mod_timer(&bdata->timer, 371 - jiffies + msecs_to_jiffies(bdata->timer_debounce)); 372 - else 373 - schedule_work(&bdata->work); 369 + 370 + mod_delayed_work(system_wq, 371 + &bdata->work, 372 + msecs_to_jiffies(bdata->software_debounce)); 374 373 375 374 return IRQ_HANDLED; 376 375 } ··· 407 408 input_event(input, EV_KEY, button->code, 1); 408 409 input_sync(input); 409 410 410 - if (!bdata->timer_debounce) { 411 + if (!bdata->release_delay) { 411 412 input_event(input, EV_KEY, button->code, 0); 412 413 input_sync(input); 413 414 goto out; ··· 416 417 bdata->key_pressed = true; 417 418 } 418 419 419 - if (bdata->timer_debounce) 420 - mod_timer(&bdata->timer, 421 - jiffies + msecs_to_jiffies(bdata->timer_debounce)); 420 + if (bdata->release_delay) 421 + mod_timer(&bdata->release_timer, 422 + jiffies + msecs_to_jiffies(bdata->release_delay)); 422 423 out: 423 424 spin_unlock_irqrestore(&bdata->lock, flags); 424 425 return IRQ_HANDLED; ··· 428 429 { 429 430 struct gpio_button_data *bdata = data; 430 431 431 - if (bdata->timer_debounce) 432 - del_timer_sync(&bdata->timer); 433 - 434 - cancel_work_sync(&bdata->work); 432 + if (gpio_is_valid(bdata->button->gpio)) 433 + cancel_delayed_work_sync(&bdata->work); 434 + else 435 + del_timer_sync(&bdata->release_timer); 435 436 } 436 437 437 438 static int gpio_keys_setup_key(struct platform_device *pdev, ··· 465 466 button->debounce_interval * 1000); 466 467 /* use timer if gpiolib doesn't provide debounce */ 467 468 if (error < 0) 468 - bdata->timer_debounce = 469 + bdata->software_debounce = 469 470 button->debounce_interval; 470 471 } 471 472 472 - irq = gpio_to_irq(button->gpio); 473 - if (irq < 0) { 474 - error = irq; 475 - dev_err(dev, 476 - "Unable to get irq number for GPIO %d, error %d\n", 477 - button->gpio, error); 478 - return error; 473 + if (button->irq) { 474 + bdata->irq = button->irq; 475 + } else { 476 + irq = gpio_to_irq(button->gpio); 477 + if (irq < 0) { 478 + error = irq; 479 + dev_err(dev, 480 + "Unable to get irq number for GPIO %d, error %d\n", 481 + button->gpio, error); 482 + return error; 483 + } 484 + bdata->irq = irq; 479 485 } 480 - bdata->irq = irq; 481 486 482 - INIT_WORK(&bdata->work, gpio_keys_gpio_work_func); 483 - setup_timer(&bdata->timer, 484 - gpio_keys_gpio_timer, (unsigned long)bdata); 487 + INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func); 485 488 486 489 isr = gpio_keys_gpio_isr; 487 490 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ··· 500 499 return -EINVAL; 501 500 } 502 501 503 - bdata->timer_debounce = button->debounce_interval; 504 - setup_timer(&bdata->timer, 502 + bdata->release_delay = button->debounce_interval; 503 + setup_timer(&bdata->release_timer, 505 504 gpio_keys_irq_timer, (unsigned long)bdata); 506 505 507 506 isr = gpio_keys_irq_isr; ··· 511 510 input_set_capability(input, button->type ?: EV_KEY, button->code); 512 511 513 512 /* 514 - * Install custom action to cancel debounce timer and 513 + * Install custom action to cancel release timer and 515 514 * workqueue item. 516 515 */ 517 516 error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata); ··· 619 618 620 619 i = 0; 621 620 for_each_child_of_node(node, pp) { 622 - int gpio = -1; 623 621 enum of_gpio_flags flags; 624 622 625 623 button = &pdata->buttons[i++]; 626 624 627 - if (!of_find_property(pp, "gpios", NULL)) { 628 - button->irq = irq_of_parse_and_map(pp, 0); 629 - if (button->irq == 0) { 630 - i--; 631 - pdata->nbuttons--; 632 - dev_warn(dev, "Found button without gpios or irqs\n"); 633 - continue; 634 - } 635 - } else { 636 - gpio = of_get_gpio_flags(pp, 0, &flags); 637 - if (gpio < 0) { 638 - error = gpio; 625 + button->gpio = of_get_gpio_flags(pp, 0, &flags); 626 + if (button->gpio < 0) { 627 + error = button->gpio; 628 + if (error != -ENOENT) { 639 629 if (error != -EPROBE_DEFER) 640 630 dev_err(dev, 641 631 "Failed to get gpio flags, error: %d\n", 642 632 error); 643 633 return ERR_PTR(error); 644 634 } 635 + } else { 636 + button->active_low = flags & OF_GPIO_ACTIVE_LOW; 645 637 } 646 638 647 - button->gpio = gpio; 648 - button->active_low = flags & OF_GPIO_ACTIVE_LOW; 639 + button->irq = irq_of_parse_and_map(pp, 0); 640 + 641 + if (!gpio_is_valid(button->gpio) && !button->irq) { 642 + dev_err(dev, "Found button without gpios or irqs\n"); 643 + return ERR_PTR(-EINVAL); 644 + } 649 645 650 646 if (of_property_read_u32(pp, "linux,code", &button->code)) { 651 647 dev_err(dev, "Button without keycode: 0x%x\n", ··· 656 658 button->type = EV_KEY; 657 659 658 660 button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL); 661 + 662 + button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL); 659 663 660 664 if (of_property_read_u32(pp, "debounce-interval", 661 665 &button->debounce_interval))
+3 -3
drivers/input/keyboard/hil_kbd.c
··· 473 473 if (error) 474 474 goto bail1; 475 475 476 - init_completion(&dev->cmd_done); 476 + reinit_completion(&dev->cmd_done); 477 477 serio_write(serio, 0); 478 478 serio_write(serio, 0); 479 479 serio_write(serio, HIL_PKT_CMD >> 8); ··· 482 482 if (error) 483 483 goto bail1; 484 484 485 - init_completion(&dev->cmd_done); 485 + reinit_completion(&dev->cmd_done); 486 486 serio_write(serio, 0); 487 487 serio_write(serio, 0); 488 488 serio_write(serio, HIL_PKT_CMD >> 8); ··· 491 491 if (error) 492 492 goto bail1; 493 493 494 - init_completion(&dev->cmd_done); 494 + reinit_completion(&dev->cmd_done); 495 495 serio_write(serio, 0); 496 496 serio_write(serio, 0); 497 497 serio_write(serio, HIL_PKT_CMD >> 8);
+80 -61
drivers/input/keyboard/stmpe-keypad.c
··· 45 45 #define STMPE_KEYPAD_MAX_ROWS 8 46 46 #define STMPE_KEYPAD_MAX_COLS 8 47 47 #define STMPE_KEYPAD_ROW_SHIFT 3 48 - #define STMPE_KEYPAD_KEYMAP_SIZE \ 48 + #define STMPE_KEYPAD_KEYMAP_MAX_SIZE \ 49 49 (STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS) 50 50 51 51 /** 52 52 * struct stmpe_keypad_variant - model-specific attributes 53 53 * @auto_increment: whether the KPC_DATA_BYTE register address 54 54 * auto-increments on multiple read 55 + * @set_pullup: whether the pins need to have their pull-ups set 55 56 * @num_data: number of data bytes 56 57 * @num_normal_data: number of normal keys' data bytes 57 58 * @max_cols: maximum number of columns supported ··· 62 61 */ 63 62 struct stmpe_keypad_variant { 64 63 bool auto_increment; 64 + bool set_pullup; 65 65 int num_data; 66 66 int num_normal_data; 67 67 int max_cols; ··· 83 81 }, 84 82 [STMPE2401] = { 85 83 .auto_increment = false, 84 + .set_pullup = true, 86 85 .num_data = 3, 87 86 .num_normal_data = 2, 88 87 .max_cols = 8, ··· 93 90 }, 94 91 [STMPE2403] = { 95 92 .auto_increment = true, 93 + .set_pullup = true, 96 94 .num_data = 5, 97 95 .num_normal_data = 3, 98 96 .max_cols = 8, ··· 103 99 }, 104 100 }; 105 101 102 + /** 103 + * struct stmpe_keypad - STMPE keypad state container 104 + * @stmpe: pointer to parent STMPE device 105 + * @input: spawned input device 106 + * @variant: STMPE variant 107 + * @debounce_ms: debounce interval, in ms. Maximum is 108 + * %STMPE_KEYPAD_MAX_DEBOUNCE. 109 + * @scan_count: number of key scanning cycles to confirm key data. 110 + * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT. 111 + * @no_autorepeat: disable key autorepeat 112 + * @rows: bitmask for the rows 113 + * @cols: bitmask for the columns 114 + * @keymap: the keymap 115 + */ 106 116 struct stmpe_keypad { 107 117 struct stmpe *stmpe; 108 118 struct input_dev *input; 109 119 const struct stmpe_keypad_variant *variant; 110 - const struct stmpe_keypad_platform_data *plat; 111 - 120 + unsigned int debounce_ms; 121 + unsigned int scan_count; 122 + bool no_autorepeat; 112 123 unsigned int rows; 113 124 unsigned int cols; 114 - 115 - unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE]; 125 + unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE]; 116 126 }; 117 127 118 128 static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data) ··· 189 171 unsigned int col_gpios = variant->col_gpios; 190 172 unsigned int row_gpios = variant->row_gpios; 191 173 struct stmpe *stmpe = keypad->stmpe; 174 + u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB]; 192 175 unsigned int pins = 0; 176 + unsigned int pu_pins = 0; 177 + int ret; 193 178 int i; 194 179 195 180 /* ··· 209 188 for (i = 0; i < variant->max_cols; i++) { 210 189 int num = __ffs(col_gpios); 211 190 212 - if (keypad->cols & (1 << i)) 191 + if (keypad->cols & (1 << i)) { 213 192 pins |= 1 << num; 193 + pu_pins |= 1 << num; 194 + } 214 195 215 196 col_gpios &= ~(1 << num); 216 197 } ··· 226 203 row_gpios &= ~(1 << num); 227 204 } 228 205 229 - return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD); 206 + ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD); 207 + if (ret) 208 + return ret; 209 + 210 + /* 211 + * On STMPE24xx, set pin bias to pull-up on all keypad input 212 + * pins (columns), this incidentally happen to be maximum 8 pins 213 + * and placed at GPIO0-7 so only the LSB of the pull up register 214 + * ever needs to be written. 215 + */ 216 + if (variant->set_pullup) { 217 + u8 val; 218 + 219 + ret = stmpe_reg_read(stmpe, pureg); 220 + if (ret) 221 + return ret; 222 + 223 + /* Do not touch unused pins, may be used for GPIO */ 224 + val = ret & ~pu_pins; 225 + val |= pu_pins; 226 + 227 + ret = stmpe_reg_write(stmpe, pureg, val); 228 + } 229 + 230 + return 0; 230 231 } 231 232 232 233 static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad) 233 234 { 234 - const struct stmpe_keypad_platform_data *plat = keypad->plat; 235 235 const struct stmpe_keypad_variant *variant = keypad->variant; 236 236 struct stmpe *stmpe = keypad->stmpe; 237 237 int ret; 238 238 239 - if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE) 239 + if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE) 240 240 return -EINVAL; 241 241 242 - if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT) 242 + if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT) 243 243 return -EINVAL; 244 244 245 245 ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD); ··· 291 245 292 246 ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB, 293 247 STMPE_KPC_CTRL_MSB_SCAN_COUNT, 294 - plat->scan_count << 4); 248 + keypad->scan_count << 4); 295 249 if (ret < 0) 296 250 return ret; 297 251 ··· 299 253 STMPE_KPC_CTRL_LSB_SCAN | 300 254 STMPE_KPC_CTRL_LSB_DEBOUNCE, 301 255 STMPE_KPC_CTRL_LSB_SCAN | 302 - (plat->debounce_ms << 1)); 256 + (keypad->debounce_ms << 1)); 303 257 } 304 258 305 - static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad) 259 + static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad, 260 + u32 used_rows, u32 used_cols) 306 261 { 307 262 int row, col; 308 263 309 - for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) { 310 - for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) { 264 + for (row = 0; row < used_rows; row++) { 265 + for (col = 0; col < used_cols; col++) { 311 266 int code = MATRIX_SCAN_CODE(row, col, 312 - STMPE_KEYPAD_ROW_SHIFT); 267 + STMPE_KEYPAD_ROW_SHIFT); 313 268 if (keypad->keymap[code] != KEY_RESERVED) { 314 269 keypad->rows |= 1 << row; 315 270 keypad->cols |= 1 << col; ··· 319 272 } 320 273 } 321 274 322 - #ifdef CONFIG_OF 323 - static const struct stmpe_keypad_platform_data * 324 - stmpe_keypad_of_probe(struct device *dev) 325 - { 326 - struct device_node *np = dev->of_node; 327 - struct stmpe_keypad_platform_data *plat; 328 - 329 - if (!np) 330 - return ERR_PTR(-ENODEV); 331 - 332 - plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); 333 - if (!plat) 334 - return ERR_PTR(-ENOMEM); 335 - 336 - of_property_read_u32(np, "debounce-interval", &plat->debounce_ms); 337 - of_property_read_u32(np, "st,scan-count", &plat->scan_count); 338 - 339 - plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat"); 340 - 341 - return plat; 342 - } 343 - #else 344 - static inline const struct stmpe_keypad_platform_data * 345 - stmpe_keypad_of_probe(struct device *dev) 346 - { 347 - return ERR_PTR(-EINVAL); 348 - } 349 - #endif 350 - 351 275 static int stmpe_keypad_probe(struct platform_device *pdev) 352 276 { 353 277 struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent); 354 - const struct stmpe_keypad_platform_data *plat; 278 + struct device_node *np = pdev->dev.of_node; 355 279 struct stmpe_keypad *keypad; 356 280 struct input_dev *input; 281 + u32 rows; 282 + u32 cols; 357 283 int error; 358 284 int irq; 359 - 360 - plat = stmpe->pdata->keypad; 361 - if (!plat) { 362 - plat = stmpe_keypad_of_probe(&pdev->dev); 363 - if (IS_ERR(plat)) 364 - return PTR_ERR(plat); 365 - } 366 285 367 286 irq = platform_get_irq(pdev, 0); 368 287 if (irq < 0) ··· 339 326 if (!keypad) 340 327 return -ENOMEM; 341 328 329 + keypad->stmpe = stmpe; 330 + keypad->variant = &stmpe_keypad_variants[stmpe->partnum]; 331 + 332 + of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms); 333 + of_property_read_u32(np, "st,scan-count", &keypad->scan_count); 334 + keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat"); 335 + 342 336 input = devm_input_allocate_device(&pdev->dev); 343 337 if (!input) 344 338 return -ENOMEM; ··· 354 334 input->id.bustype = BUS_I2C; 355 335 input->dev.parent = &pdev->dev; 356 336 357 - error = matrix_keypad_build_keymap(plat->keymap_data, NULL, 358 - STMPE_KEYPAD_MAX_ROWS, 359 - STMPE_KEYPAD_MAX_COLS, 337 + error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols); 338 + if (error) 339 + return error; 340 + 341 + error = matrix_keypad_build_keymap(NULL, NULL, rows, cols, 360 342 keypad->keymap, input); 361 343 if (error) 362 344 return error; 363 345 364 346 input_set_capability(input, EV_MSC, MSC_SCAN); 365 - if (!plat->no_autorepeat) 347 + if (!keypad->no_autorepeat) 366 348 __set_bit(EV_REP, input->evbit); 367 349 368 - stmpe_keypad_fill_used_pins(keypad); 350 + stmpe_keypad_fill_used_pins(keypad, rows, cols); 369 351 370 - keypad->stmpe = stmpe; 371 - keypad->plat = plat; 372 352 keypad->input = input; 373 - keypad->variant = &stmpe_keypad_variants[stmpe->partnum]; 374 353 375 354 error = stmpe_keypad_chip_init(keypad); 376 355 if (error < 0)
+74 -10
drivers/input/mouse/alps.c
··· 881 881 unsigned char *pkt, 882 882 unsigned char pkt_id) 883 883 { 884 + /* 885 + * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0 886 + * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0 887 + * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0 888 + * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3 889 + * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5 890 + * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0 891 + * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4 892 + * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1 893 + * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0 894 + * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4 895 + * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0 896 + * L: Left button 897 + * R / M: Non-clickpads: Right / Middle button 898 + * Clickpads: When > 2 fingers are down, and some fingers 899 + * are in the button area, then the 2 coordinates reported 900 + * are for fingers outside the button area and these report 901 + * extra fingers being present in the right / left button 902 + * area. Note these fingers are not added to the F field! 903 + * so if a TWO packet is received and R = 1 then there are 904 + * 3 fingers down, etc. 905 + * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt 906 + * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt 907 + * otherwise byte 0 bit 4 must be set and byte 0/4/5 are 908 + * in NEW fmt 909 + * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ... 910 + */ 911 + 884 912 mt[0].x = ((pkt[2] & 0x80) << 4); 885 913 mt[0].x |= ((pkt[2] & 0x3F) << 5); 886 914 mt[0].x |= ((pkt[3] & 0x30) >> 1); ··· 947 919 948 920 static int alps_get_mt_count(struct input_mt_pos *mt) 949 921 { 950 - int i; 922 + int i, fingers = 0; 951 923 952 - for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++) 953 - /* empty */; 924 + for (i = 0; i < MAX_TOUCHES; i++) { 925 + if (mt[i].x != 0 || mt[i].y != 0) 926 + fingers++; 927 + } 954 928 955 - return i; 929 + return fingers; 956 930 } 957 931 958 932 static int alps_decode_packet_v7(struct alps_fields *f, 959 933 unsigned char *p, 960 934 struct psmouse *psmouse) 961 935 { 936 + struct alps_data *priv = psmouse->private; 962 937 unsigned char pkt_id; 963 938 964 939 pkt_id = alps_get_packet_id_v7(p); ··· 969 938 return 0; 970 939 if (pkt_id == V7_PACKET_ID_UNKNOWN) 971 940 return -1; 941 + /* 942 + * NEW packets are send to indicate a discontinuity in the finger 943 + * coordinate reporting. Specifically a finger may have moved from 944 + * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for 945 + * us. 946 + * 947 + * NEW packets have 3 problems: 948 + * 1) They do not contain middle / right button info (on non clickpads) 949 + * this can be worked around by preserving the old button state 950 + * 2) They do not contain an accurate fingercount, and they are 951 + * typically send when the number of fingers changes. We cannot use 952 + * the old finger count as that may mismatch with the amount of 953 + * touch coordinates we've available in the NEW packet 954 + * 3) Their x data for the second touch is inaccurate leading to 955 + * a possible jump of the x coordinate by 16 units when the first 956 + * non NEW packet comes in 957 + * Since problems 2 & 3 cannot be worked around, just ignore them. 958 + */ 959 + if (pkt_id == V7_PACKET_ID_NEW) 960 + return 1; 972 961 973 962 alps_get_finger_coordinate_v7(f->mt, p, pkt_id); 974 963 975 - if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) { 976 - f->left = (p[0] & 0x80) >> 7; 964 + if (pkt_id == V7_PACKET_ID_TWO) 965 + f->fingers = alps_get_mt_count(f->mt); 966 + else /* pkt_id == V7_PACKET_ID_MULTI */ 967 + f->fingers = 3 + (p[5] & 0x03); 968 + 969 + f->left = (p[0] & 0x80) >> 7; 970 + if (priv->flags & ALPS_BUTTONPAD) { 971 + if (p[0] & 0x20) 972 + f->fingers++; 973 + if (p[0] & 0x10) 974 + f->fingers++; 975 + } else { 977 976 f->right = (p[0] & 0x20) >> 5; 978 977 f->middle = (p[0] & 0x10) >> 4; 979 978 } 980 979 981 - if (pkt_id == V7_PACKET_ID_TWO) 982 - f->fingers = alps_get_mt_count(f->mt); 983 - else if (pkt_id == V7_PACKET_ID_MULTI) 984 - f->fingers = 3 + (p[5] & 0x03); 980 + /* Sometimes a single touch is reported in mt[1] rather then mt[0] */ 981 + if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) { 982 + f->mt[0].x = f->mt[1].x; 983 + f->mt[0].y = f->mt[1].y; 984 + f->mt[1].x = 0; 985 + f->mt[1].y = 0; 986 + } 985 987 986 988 return 0; 987 989 }
+4
drivers/input/mouse/trackpoint.c
··· 227 227 TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH); 228 228 TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME); 229 229 TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV); 230 + TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME); 230 231 231 232 TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0, 232 233 TP_DEF_PTSON); ··· 247 246 &psmouse_attr_upthresh.dattr.attr, 248 247 &psmouse_attr_ztime.dattr.attr, 249 248 &psmouse_attr_jenks.dattr.attr, 249 + &psmouse_attr_drift_time.dattr.attr, 250 250 &psmouse_attr_press_to_select.dattr.attr, 251 251 &psmouse_attr_skipback.dattr.attr, 252 252 &psmouse_attr_ext_dev.dattr.attr, ··· 314 312 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh); 315 313 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime); 316 314 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks); 315 + TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time); 317 316 318 317 /* toggles */ 319 318 TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select); ··· 335 332 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh); 336 333 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime); 337 334 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks); 335 + TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time); 338 336 TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia); 339 337 340 338 /* toggles */
+5
drivers/input/mouse/trackpoint.h
··· 70 70 #define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */ 71 71 #define TP_Z_TIME 0x5E /* How sharp of a press */ 72 72 #define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */ 73 + #define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */ 74 + /* must last (x*107ms) for drift */ 75 + /* correction to occur */ 73 76 74 77 /* 75 78 * Toggling Flag bits ··· 123 120 #define TP_DEF_UP_THRESH 0xFF 124 121 #define TP_DEF_Z_TIME 0x26 125 122 #define TP_DEF_JENKS_CURV 0x87 123 + #define TP_DEF_DRIFT_TIME 0x05 126 124 127 125 /* Toggles */ 128 126 #define TP_DEF_MB 0x00 ··· 141 137 unsigned char draghys, mindrag; 142 138 unsigned char thresh, upthresh; 143 139 unsigned char ztime, jenks; 140 + unsigned char drift_time; 144 141 145 142 /* toggles */ 146 143 unsigned char press_to_select;
+26 -73
drivers/input/touchscreen/atmel_mxt_ts.c
··· 99 99 #define MXT_T6_STATUS_COMSERR (1 << 2) 100 100 101 101 /* MXT_GEN_POWER_T7 field */ 102 - struct t7_config { 103 - u8 idle; 104 - u8 active; 105 - } __packed; 106 - 107 - #define MXT_POWER_CFG_RUN 0 108 - #define MXT_POWER_CFG_DEEPSLEEP 1 102 + #define MXT_POWER_IDLEACQINT 0 103 + #define MXT_POWER_ACTVACQINT 1 104 + #define MXT_POWER_ACTV2IDLETO 2 109 105 110 106 /* MXT_GEN_ACQUIRE_T8 field */ 111 107 #define MXT_ACQUIRE_CHRGTIME 0 ··· 113 117 #define MXT_ACQUIRE_ATCHCALSTHR 7 114 118 115 119 /* MXT_TOUCH_MULTI_T9 field */ 120 + #define MXT_TOUCH_CTRL 0 116 121 #define MXT_T9_ORIENT 9 117 122 #define MXT_T9_RANGE 18 118 123 ··· 253 256 bool update_input; 254 257 u8 last_message_count; 255 258 u8 num_touchids; 256 - struct t7_config t7_cfg; 257 259 258 260 /* Cached parameters from object table */ 259 261 u16 T5_address; ··· 666 670 667 671 /* Save current status */ 668 672 data->t6_status = status; 673 + } 674 + 675 + static int mxt_write_object(struct mxt_data *data, 676 + u8 type, u8 offset, u8 val) 677 + { 678 + struct mxt_object *object; 679 + u16 reg; 680 + 681 + object = mxt_get_object(data, type); 682 + if (!object || offset >= mxt_obj_size(object)) 683 + return -EINVAL; 684 + 685 + reg = object->start_address; 686 + return mxt_write_reg(data->client, reg + offset, val); 669 687 } 670 688 671 689 static void mxt_input_button(struct mxt_data *data, u8 *message) ··· 1752 1742 return error; 1753 1743 } 1754 1744 1755 - static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) 1756 - { 1757 - struct device *dev = &data->client->dev; 1758 - int error; 1759 - struct t7_config *new_config; 1760 - struct t7_config deepsleep = { .active = 0, .idle = 0 }; 1761 - 1762 - if (sleep == MXT_POWER_CFG_DEEPSLEEP) 1763 - new_config = &deepsleep; 1764 - else 1765 - new_config = &data->t7_cfg; 1766 - 1767 - error = __mxt_write_reg(data->client, data->T7_address, 1768 - sizeof(data->t7_cfg), new_config); 1769 - if (error) 1770 - return error; 1771 - 1772 - dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n", 1773 - new_config->active, new_config->idle); 1774 - 1775 - return 0; 1776 - } 1777 - 1778 - static int mxt_init_t7_power_cfg(struct mxt_data *data) 1779 - { 1780 - struct device *dev = &data->client->dev; 1781 - int error; 1782 - bool retry = false; 1783 - 1784 - recheck: 1785 - error = __mxt_read_reg(data->client, data->T7_address, 1786 - sizeof(data->t7_cfg), &data->t7_cfg); 1787 - if (error) 1788 - return error; 1789 - 1790 - if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) { 1791 - if (!retry) { 1792 - dev_dbg(dev, "T7 cfg zero, resetting\n"); 1793 - mxt_soft_reset(data); 1794 - retry = true; 1795 - goto recheck; 1796 - } else { 1797 - dev_dbg(dev, "T7 cfg zero after reset, overriding\n"); 1798 - data->t7_cfg.active = 20; 1799 - data->t7_cfg.idle = 100; 1800 - return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN); 1801 - } 1802 - } 1803 - 1804 - dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n", 1805 - data->t7_cfg.active, data->t7_cfg.idle); 1806 - return 0; 1807 - } 1808 - 1809 1745 static int mxt_configure_objects(struct mxt_data *data, 1810 1746 const struct firmware *cfg) 1811 1747 { ··· 1763 1807 error = mxt_update_cfg(data, cfg); 1764 1808 if (error) 1765 1809 dev_warn(dev, "Error %d updating config\n", error); 1766 - } 1767 - 1768 - error = mxt_init_t7_power_cfg(data); 1769 - if (error) { 1770 - dev_err(dev, "Failed to initialize power cfg\n"); 1771 - return error; 1772 1810 } 1773 1811 1774 1812 error = mxt_initialize_t9_input_device(data); ··· 2043 2093 2044 2094 static void mxt_start(struct mxt_data *data) 2045 2095 { 2046 - mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN); 2047 - 2048 - /* Recalibrate since chip has been in deep sleep */ 2049 - mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false); 2096 + /* Touch enable */ 2097 + mxt_write_object(data, 2098 + MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83); 2050 2099 } 2051 2100 2052 2101 static void mxt_stop(struct mxt_data *data) 2053 2102 { 2054 - mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP); 2103 + /* Touch disable */ 2104 + mxt_write_object(data, 2105 + MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0); 2055 2106 } 2056 2107 2057 2108 static int mxt_input_open(struct input_dev *dev) ··· 2216 2265 struct i2c_client *client = to_i2c_client(dev); 2217 2266 struct mxt_data *data = i2c_get_clientdata(client); 2218 2267 struct input_dev *input_dev = data->input_dev; 2268 + 2269 + mxt_soft_reset(data); 2219 2270 2220 2271 mutex_lock(&input_dev->mutex); 2221 2272
+3 -1
drivers/input/touchscreen/edt-ft5x06.c
··· 850 850 } 851 851 852 852 #define EDT_ATTR_CHECKSET(name, reg) \ 853 + do { \ 853 854 if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \ 854 855 pdata->name <= edt_ft5x06_attr_##name.limit_high) \ 855 - edt_ft5x06_register_write(tsdata, reg, pdata->name) 856 + edt_ft5x06_register_write(tsdata, reg, pdata->name); \ 857 + } while (0) 856 858 857 859 #define EDT_GET_PROP(name, reg) { \ 858 860 u32 val; \
+4 -8
drivers/iommu/intel-iommu.c
··· 4029 4029 if (action != BUS_NOTIFY_REMOVED_DEVICE) 4030 4030 return 0; 4031 4031 4032 - /* 4033 - * If the device is still attached to a device driver we can't 4034 - * tear down the domain yet as DMA mappings may still be in use. 4035 - * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. 4036 - */ 4037 - if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) 4038 - return 0; 4039 - 4040 4032 domain = find_domain(dev); 4041 4033 if (!domain) 4042 4034 return 0; ··· 4420 4428 domain_remove_one_dev_info(old_domain, dev); 4421 4429 else 4422 4430 domain_remove_dev_info(old_domain); 4431 + 4432 + if (!domain_type_is_vm_or_si(old_domain) && 4433 + list_empty(&old_domain->devices)) 4434 + domain_exit(old_domain); 4423 4435 } 4424 4436 } 4425 4437
+3 -3
drivers/iommu/ipmmu-vmsa.c
··· 558 558 559 559 static u64 ipmmu_page_prot(unsigned int prot, u64 type) 560 560 { 561 - u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF 561 + u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF 562 562 | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV 563 563 | ARM_VMSA_PTE_NS | type; 564 564 ··· 568 568 if (prot & IOMMU_CACHE) 569 569 pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; 570 570 571 - if (prot & IOMMU_EXEC) 572 - pgprot &= ~ARM_VMSA_PTE_XN; 571 + if (prot & IOMMU_NOEXEC) 572 + pgprot |= ARM_VMSA_PTE_XN; 573 573 else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 574 574 /* If no access create a faulting entry to avoid TLB fills. */ 575 575 pgprot &= ~ARM_VMSA_PTE_PAGE;
-1
drivers/iommu/rockchip-iommu.c
··· 1009 1009 .remove = rk_iommu_remove, 1010 1010 .driver = { 1011 1011 .name = "rk_iommu", 1012 - .owner = THIS_MODULE, 1013 1012 .of_match_table = of_match_ptr(rk_iommu_dt_ids), 1014 1013 }, 1015 1014 };
+1 -1
drivers/isdn/hardware/eicon/message.c
··· 4880 4880 byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ 4881 4881 byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; 4882 4882 byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; 4883 - byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; 4883 + byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00"; 4884 4884 byte force_mt_info = false; 4885 4885 byte dir; 4886 4886 dword d;
+6 -6
drivers/leds/leds-netxbig.c
··· 330 330 led_dat->sata = 0; 331 331 led_dat->cdev.brightness = LED_OFF; 332 332 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 333 - /* 334 - * If available, expose the SATA activity blink capability through 335 - * a "sata" sysfs attribute. 336 - */ 337 - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 338 - led_dat->cdev.groups = netxbig_led_groups; 339 333 led_dat->mode_addr = template->mode_addr; 340 334 led_dat->mode_val = template->mode_val; 341 335 led_dat->bright_addr = template->bright_addr; 342 336 led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; 343 337 led_dat->timer = pdata->timer; 344 338 led_dat->num_timer = pdata->num_timer; 339 + /* 340 + * If available, expose the SATA activity blink capability through 341 + * a "sata" sysfs attribute. 342 + */ 343 + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) 344 + led_dat->cdev.groups = netxbig_led_groups; 345 345 346 346 return led_classdev_register(&pdev->dev, &led_dat->cdev); 347 347 }
+1
drivers/mcb/mcb-internal.h
··· 7 7 #define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45 8 8 #define CHAMELEON_FILENAME_LEN 12 9 9 #define CHAMELEONV2_MAGIC 0xabce 10 + #define CHAM_HEADER_SIZE 0x200 10 11 11 12 enum chameleon_descriptor_type { 12 13 CHAMELEON_DTYPE_GENERAL = 0x0,
+18 -9
drivers/mcb/mcb-pci.c
··· 17 17 18 18 struct priv { 19 19 struct mcb_bus *bus; 20 + phys_addr_t mapbase; 20 21 void __iomem *base; 21 22 }; 22 23 ··· 32 31 33 32 static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 34 33 { 34 + struct resource *res; 35 35 struct priv *priv; 36 - phys_addr_t mapbase; 37 36 int ret; 38 37 int num_cells; 39 38 unsigned long flags; ··· 48 47 return -ENODEV; 49 48 } 50 49 51 - mapbase = pci_resource_start(pdev, 0); 52 - if (!mapbase) { 50 + priv->mapbase = pci_resource_start(pdev, 0); 51 + if (!priv->mapbase) { 53 52 dev_err(&pdev->dev, "No PCI resource\n"); 54 53 goto err_start; 55 54 } 56 55 57 - ret = pci_request_region(pdev, 0, KBUILD_MODNAME); 58 - if (ret) { 59 - dev_err(&pdev->dev, "Failed to request PCI BARs\n"); 56 + res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE, 57 + KBUILD_MODNAME); 58 + if (IS_ERR(res)) { 59 + dev_err(&pdev->dev, "Failed to request PCI memory\n"); 60 + ret = PTR_ERR(res); 60 61 goto err_start; 61 62 } 62 63 63 - priv->base = pci_iomap(pdev, 0, 0); 64 + priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE); 64 65 if (!priv->base) { 65 66 dev_err(&pdev->dev, "Cannot ioremap\n"); 66 67 ret = -ENOMEM; ··· 87 84 88 85 priv->bus->get_irq = mcb_pci_get_irq; 89 86 90 - ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); 87 + ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); 91 88 if (ret < 0) 92 89 goto err_drvdata; 93 90 num_cells = ret; ··· 96 93 97 94 mcb_bus_add_devices(priv->bus); 98 95 96 + return 0; 97 + 99 98 err_drvdata: 100 - pci_iounmap(pdev, priv->base); 99 + iounmap(priv->base); 101 100 err_ioremap: 102 101 pci_release_region(pdev, 0); 103 102 err_start: ··· 112 107 struct priv *priv = pci_get_drvdata(pdev); 113 108 114 109 mcb_release_bus(priv->bus); 110 + 111 + iounmap(priv->base); 112 + release_region(priv->mapbase, CHAM_HEADER_SIZE); 113 + pci_disable_device(pdev); 115 114 } 116 115 117 116 static const struct pci_device_id mcb_pci_tbl[] = {
+4
drivers/mfd/stmpe.c
··· 519 519 [STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB, 520 520 [STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB, 521 521 [STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB, 522 + [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB, 522 523 [STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB, 523 524 [STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB, 524 525 [STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB, ··· 668 667 [STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW, 669 668 [STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW, 670 669 [STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW, 670 + [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW, 671 671 [STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW, 672 672 [STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW, 673 673 }; ··· 752 750 [STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB, 753 751 [STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB, 754 752 [STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB, 753 + [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB, 754 + [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB, 755 755 [STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB, 756 756 [STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB, 757 757 [STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
+3
drivers/mfd/stmpe.h
··· 188 188 #define STMPE1601_REG_GPIO_ED_MSB 0x8A 189 189 #define STMPE1601_REG_GPIO_RE_LSB 0x8D 190 190 #define STMPE1601_REG_GPIO_FE_LSB 0x8F 191 + #define STMPE1601_REG_GPIO_PU_LSB 0x91 191 192 #define STMPE1601_REG_GPIO_AF_U_MSB 0x92 192 193 193 194 #define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3) ··· 277 276 #define STMPE24XX_REG_GPEDR_MSB 0x8C 278 277 #define STMPE24XX_REG_GPRER_LSB 0x91 279 278 #define STMPE24XX_REG_GPFER_LSB 0x94 279 + #define STMPE24XX_REG_GPPUR_LSB 0x97 280 + #define STMPE24XX_REG_GPPDR_LSB 0x9a 280 281 #define STMPE24XX_REG_GPAFR_U_MSB 0x9B 281 282 282 283 #define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
+63 -19
drivers/misc/cxl/context.c
··· 100 100 return 0; 101 101 } 102 102 103 + static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 104 + { 105 + struct cxl_context *ctx = vma->vm_file->private_data; 106 + unsigned long address = (unsigned long)vmf->virtual_address; 107 + u64 area, offset; 108 + 109 + offset = vmf->pgoff << PAGE_SHIFT; 110 + 111 + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", 112 + __func__, ctx->pe, address, offset); 113 + 114 + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 115 + area = ctx->afu->psn_phys; 116 + if (offset > ctx->afu->adapter->ps_size) 117 + return VM_FAULT_SIGBUS; 118 + } else { 119 + area = ctx->psn_phys; 120 + if (offset > ctx->psn_size) 121 + return VM_FAULT_SIGBUS; 122 + } 123 + 124 + mutex_lock(&ctx->status_mutex); 125 + 126 + if (ctx->status != STARTED) { 127 + mutex_unlock(&ctx->status_mutex); 128 + pr_devel("%s: Context not started, failing problem state access\n", __func__); 129 + return VM_FAULT_SIGBUS; 130 + } 131 + 132 + vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 133 + 134 + mutex_unlock(&ctx->status_mutex); 135 + 136 + return VM_FAULT_NOPAGE; 137 + } 138 + 139 + static const struct vm_operations_struct cxl_mmap_vmops = { 140 + .fault = cxl_mmap_fault, 141 + }; 142 + 103 143 /* 104 144 * Map a per-context mmio space into the given vma. 105 145 */ ··· 148 108 u64 len = vma->vm_end - vma->vm_start; 149 109 len = min(len, ctx->psn_size); 150 110 151 - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 152 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 153 - return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); 154 - } 111 + if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 112 + /* make sure there is a valid per process space for this AFU */ 113 + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 114 + pr_devel("AFU doesn't support mmio space\n"); 115 + return -EINVAL; 116 + } 155 117 156 - /* make sure there is a valid per process space for this AFU */ 157 - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { 158 - pr_devel("AFU doesn't support mmio space\n"); 159 - return -EINVAL; 118 + /* Can't mmap until the AFU is enabled */ 119 + if (!ctx->afu->enabled) 120 + return -EBUSY; 160 121 } 161 - 162 - /* Can't mmap until the AFU is enabled */ 163 - if (!ctx->afu->enabled) 164 - return -EBUSY; 165 122 166 123 pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, 167 124 ctx->psn_phys, ctx->pe , ctx->master); 168 125 126 + vma->vm_flags |= VM_IO | VM_PFNMAP; 169 127 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 170 - return vm_iomap_memory(vma, ctx->psn_phys, len); 128 + vma->vm_ops = &cxl_mmap_vmops; 129 + return 0; 171 130 } 172 131 173 132 /* ··· 189 150 afu_release_irqs(ctx); 190 151 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 191 152 wake_up_all(&ctx->wq); 192 - 193 - /* Release Problem State Area mapping */ 194 - mutex_lock(&ctx->mapping_lock); 195 - if (ctx->mapping) 196 - unmap_mapping_range(ctx->mapping, 0, 0, 1); 197 - mutex_unlock(&ctx->mapping_lock); 198 153 } 199 154 200 155 /* ··· 217 184 * created and torn down after the IDR removed 218 185 */ 219 186 __detach_context(ctx); 187 + 188 + /* 189 + * We are force detaching - remove any active PSA mappings so 190 + * userspace cannot interfere with the card if it comes back. 191 + * Easiest way to exercise this is to unbind and rebind the 192 + * driver via sysfs while it is in use. 193 + */ 194 + mutex_lock(&ctx->mapping_lock); 195 + if (ctx->mapping) 196 + unmap_mapping_range(ctx->mapping, 0, 0, 1); 197 + mutex_unlock(&ctx->mapping_lock); 220 198 } 221 199 mutex_unlock(&afu->contexts_lock); 222 200 }
+8 -6
drivers/misc/cxl/file.c
··· 140 140 141 141 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 142 142 143 - mutex_lock(&ctx->status_mutex); 144 - if (ctx->status != OPENED) { 145 - rc = -EIO; 146 - goto out; 147 - } 148 - 143 + /* Do this outside the status_mutex to avoid a circular dependency with 144 + * the locking in cxl_mmap_fault() */ 149 145 if (copy_from_user(&work, uwork, 150 146 sizeof(struct cxl_ioctl_start_work))) { 151 147 rc = -EFAULT; 148 + goto out; 149 + } 150 + 151 + mutex_lock(&ctx->status_mutex); 152 + if (ctx->status != OPENED) { 153 + rc = -EIO; 152 154 goto out; 153 155 } 154 156
+12
drivers/misc/mei/hw-me.c
··· 234 234 struct mei_me_hw *hw = to_me_hw(dev); 235 235 u32 hcsr = mei_hcsr_read(hw); 236 236 237 + /* H_RST may be found lit before reset is started, 238 + * for example if preceding reset flow hasn't completed. 239 + * In that case asserting H_RST will be ignored, therefore 240 + * we need to clean H_RST bit to start a successful reset sequence. 241 + */ 242 + if ((hcsr & H_RST) == H_RST) { 243 + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); 244 + hcsr &= ~H_RST; 245 + mei_me_reg_write(hw, H_CSR, hcsr); 246 + hcsr = mei_hcsr_read(hw); 247 + } 248 + 237 249 hcsr |= H_RST | H_IG | H_IS; 238 250 239 251 if (intr_enable)
+1 -1
drivers/mmc/core/mmc.c
··· 886 886 unsigned idx, bus_width = 0; 887 887 int err = 0; 888 888 889 - if (!mmc_can_ext_csd(card) && 889 + if (!mmc_can_ext_csd(card) || 890 890 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) 891 891 return 0; 892 892
+2
drivers/mmc/host/sdhci-acpi.c
··· 247 247 { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, 248 248 { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, 249 249 { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, 250 + { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, 250 251 { "PNP0D40" }, 251 252 { }, 252 253 }; ··· 258 257 { "INT33BB" }, 259 258 { "INT33C6" }, 260 259 { "INT3436" }, 260 + { "INT344D" }, 261 261 { "PNP0D40" }, 262 262 { }, 263 263 };
+25
drivers/mmc/host/sdhci-pci.c
··· 993 993 .subdevice = PCI_ANY_ID, 994 994 .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, 995 995 }, 996 + 997 + { 998 + .vendor = PCI_VENDOR_ID_INTEL, 999 + .device = PCI_DEVICE_ID_INTEL_SPT_EMMC, 1000 + .subvendor = PCI_ANY_ID, 1001 + .subdevice = PCI_ANY_ID, 1002 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, 1003 + }, 1004 + 1005 + { 1006 + .vendor = PCI_VENDOR_ID_INTEL, 1007 + .device = PCI_DEVICE_ID_INTEL_SPT_SDIO, 1008 + .subvendor = PCI_ANY_ID, 1009 + .subdevice = PCI_ANY_ID, 1010 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, 1011 + }, 1012 + 1013 + { 1014 + .vendor = PCI_VENDOR_ID_INTEL, 1015 + .device = PCI_DEVICE_ID_INTEL_SPT_SD, 1016 + .subvendor = PCI_ANY_ID, 1017 + .subdevice = PCI_ANY_ID, 1018 + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, 1019 + }, 1020 + 996 1021 { 997 1022 .vendor = PCI_VENDOR_ID_O2, 998 1023 .device = PCI_DEVICE_ID_O2_8120,
+3
drivers/mmc/host/sdhci-pci.h
··· 21 21 #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 22 22 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 23 23 #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 24 + #define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b 25 + #define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c 26 + #define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d 24 27 25 28 /* 26 29 * PCI registers
+7 -8
drivers/mmc/host/sdhci-pxav3.c
··· 300 300 if (IS_ERR(host)) 301 301 return PTR_ERR(host); 302 302 303 - if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 304 - ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 305 - if (ret < 0) 306 - goto err_mbus_win; 307 - } 308 - 309 - 310 303 pltfm_host = sdhci_priv(host); 311 304 pltfm_host->priv = pxa; 312 305 ··· 317 324 pxa->clk_core = devm_clk_get(dev, "core"); 318 325 if (!IS_ERR(pxa->clk_core)) 319 326 clk_prepare_enable(pxa->clk_core); 327 + 328 + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 329 + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 330 + if (ret < 0) 331 + goto err_mbus_win; 332 + } 320 333 321 334 /* enable 1/8V DDR capable */ 322 335 host->mmc->caps |= MMC_CAP_1_8V_DDR; ··· 395 396 pm_runtime_disable(&pdev->dev); 396 397 err_of_parse: 397 398 err_cd_req: 399 + err_mbus_win: 398 400 clk_disable_unprepare(pxa->clk_io); 399 401 if (!IS_ERR(pxa->clk_core)) 400 402 clk_disable_unprepare(pxa->clk_core); 401 403 err_clk_get: 402 - err_mbus_win: 403 404 sdhci_pltfm_free(pdev); 404 405 return ret; 405 406 }
+54 -26
drivers/mmc/host/sdhci.c
··· 259 259 260 260 del_timer_sync(&host->tuning_timer); 261 261 host->flags &= ~SDHCI_NEEDS_RETUNING; 262 - host->mmc->max_blk_count = 263 - (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 264 262 } 265 263 sdhci_enable_card_detection(host); 266 264 } ··· 1271 1273 spin_unlock_irq(&host->lock); 1272 1274 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 1273 1275 spin_lock_irq(&host->lock); 1276 + 1277 + if (mode != MMC_POWER_OFF) 1278 + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 1279 + else 1280 + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 1281 + 1274 1282 return; 1275 1283 } 1276 1284 ··· 1357 1353 1358 1354 sdhci_runtime_pm_get(host); 1359 1355 1356 + present = mmc_gpio_get_cd(host->mmc); 1357 + 1360 1358 spin_lock_irqsave(&host->lock, flags); 1361 1359 1362 1360 WARN_ON(host->mrq != NULL); ··· 1387 1381 * zero: cd-gpio is used, and card is removed 1388 1382 * one: cd-gpio is used, and card is present 1389 1383 */ 1390 - present = mmc_gpio_get_cd(host->mmc); 1391 1384 if (present < 0) { 1392 1385 /* If polling, assume that the card is always present. */ 1393 1386 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ··· 1885 1880 return !(present_state & SDHCI_DATA_LVL_MASK); 1886 1881 } 1887 1882 1883 + static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 1884 + { 1885 + struct sdhci_host *host = mmc_priv(mmc); 1886 + unsigned long flags; 1887 + 1888 + spin_lock_irqsave(&host->lock, flags); 1889 + host->flags |= SDHCI_HS400_TUNING; 1890 + spin_unlock_irqrestore(&host->lock, flags); 1891 + 1892 + return 0; 1893 + } 1894 + 1888 1895 static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1889 1896 { 1890 1897 struct sdhci_host *host = mmc_priv(mmc); ··· 1904 1887 int tuning_loop_counter = MAX_TUNING_LOOP; 1905 1888 int err = 0; 1906 1889 unsigned long flags; 1890 + unsigned int tuning_count = 0; 1891 + bool hs400_tuning; 1907 1892 1908 1893 sdhci_runtime_pm_get(host); 1909 1894 spin_lock_irqsave(&host->lock, flags); 1895 + 1896 + hs400_tuning = host->flags & SDHCI_HS400_TUNING; 1897 + host->flags &= ~SDHCI_HS400_TUNING; 1898 + 1899 + if (host->tuning_mode == SDHCI_TUNING_MODE_1) 1900 + tuning_count = host->tuning_count; 1910 1901 1911 1902 /* 1912 1903 * The Host Controller needs tuning only in case of SDR104 mode ··· 1924 1899 * tuning function has to be executed. 1925 1900 */ 1926 1901 switch (host->timing) { 1902 + /* HS400 tuning is done in HS200 mode */ 1927 1903 case MMC_TIMING_MMC_HS400: 1904 + err = -EINVAL; 1905 + goto out_unlock; 1906 + 1928 1907 case MMC_TIMING_MMC_HS200: 1908 + /* 1909 + * Periodic re-tuning for HS400 is not expected to be needed, so 1910 + * disable it here. 1911 + */ 1912 + if (hs400_tuning) 1913 + tuning_count = 0; 1914 + break; 1915 + 1929 1916 case MMC_TIMING_UHS_SDR104: 1930 1917 break; 1931 1918 ··· 1948 1911 /* FALLTHROUGH */ 1949 1912 1950 1913 default: 1951 - spin_unlock_irqrestore(&host->lock, flags); 1952 - sdhci_runtime_pm_put(host); 1953 - return 0; 1914 + goto out_unlock; 1954 1915 } 1955 1916 1956 1917 if (host->ops->platform_execute_tuning) { ··· 2072 2037 } 2073 2038 2074 2039 out: 2075 - /* 2076 - * If this is the very first time we are here, we start the retuning 2077 - * timer. Since only during the first time, SDHCI_NEEDS_RETUNING 2078 - * flag won't be set, we check this condition before actually starting 2079 - * the timer. 2080 - */ 2081 - if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && 2082 - (host->tuning_mode == SDHCI_TUNING_MODE_1)) { 2040 + host->flags &= ~SDHCI_NEEDS_RETUNING; 2041 + 2042 + if (tuning_count) { 2083 2043 host->flags |= SDHCI_USING_RETUNING_TIMER; 2084 - mod_timer(&host->tuning_timer, jiffies + 2085 - host->tuning_count * HZ); 2086 - /* Tuning mode 1 limits the maximum data length to 4MB */ 2087 - mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; 2088 - } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { 2089 - host->flags &= ~SDHCI_NEEDS_RETUNING; 2090 - /* Reload the new initial value for timer */ 2091 - mod_timer(&host->tuning_timer, jiffies + 2092 - host->tuning_count * HZ); 2044 + mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); 2093 2045 } 2094 2046 2095 2047 /* ··· 2092 2070 2093 2071 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2094 2072 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2073 + out_unlock: 2095 2074 spin_unlock_irqrestore(&host->lock, flags); 2096 2075 sdhci_runtime_pm_put(host); 2097 2076 ··· 2133 2110 { 2134 2111 struct sdhci_host *host = mmc_priv(mmc); 2135 2112 unsigned long flags; 2113 + int present; 2136 2114 2137 2115 /* First check if client has provided their own card event */ 2138 2116 if (host->ops->card_event) 2139 2117 host->ops->card_event(host); 2140 2118 2119 + present = sdhci_do_get_cd(host); 2120 + 2141 2121 spin_lock_irqsave(&host->lock, flags); 2142 2122 2143 2123 /* Check host->mrq first in case we are runtime suspended */ 2144 - if (host->mrq && !sdhci_do_get_cd(host)) { 2124 + if (host->mrq && !present) { 2145 2125 pr_err("%s: Card removed during transfer!\n", 2146 2126 mmc_hostname(host->mmc)); 2147 2127 pr_err("%s: Resetting controller.\n", ··· 2168 2142 .hw_reset = sdhci_hw_reset, 2169 2143 .enable_sdio_irq = sdhci_enable_sdio_irq, 2170 2144 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 2145 + .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 2171 2146 .execute_tuning = sdhci_execute_tuning, 2172 2147 .card_event = sdhci_card_event, 2173 2148 .card_busy = sdhci_card_busy, ··· 3287 3260 mmc->max_segs = SDHCI_MAX_SEGS; 3288 3261 3289 3262 /* 3290 - * Maximum number of sectors in one transfer. Limited by DMA boundary 3291 - * size (512KiB). 3263 + * Maximum number of sectors in one transfer. Limited by SDMA boundary 3264 + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 3265 + * is less anyway. 3292 3266 */ 3293 3267 mmc->max_req_size = 524288; 3294 3268
+1 -1
drivers/net/bonding/bond_main.c
··· 1648 1648 /* slave is not a slave or master is not master of this slave */ 1649 1649 if (!(slave_dev->flags & IFF_SLAVE) || 1650 1650 !netdev_has_upper_dev(slave_dev, bond_dev)) { 1651 - netdev_err(bond_dev, "cannot release %s\n", 1651 + netdev_dbg(bond_dev, "cannot release %s\n", 1652 1652 slave_dev->name); 1653 1653 return -EINVAL; 1654 1654 }
-2
drivers/net/caif/caif_virtio.c
··· 257 257 struct vringh_kiov *riov = &cfv->ctx.riov; 258 258 unsigned int skb_len; 259 259 260 - again: 261 260 do { 262 261 skb = NULL; 263 262 ··· 321 322 napi_schedule_prep(napi)) { 322 323 vringh_notify_disable_kern(cfv->vr_rx); 323 324 __napi_schedule(napi); 324 - goto again; 325 325 } 326 326 break; 327 327
+4 -2
drivers/net/ethernet/8390/ne2k-pci.c
··· 246 246 247 247 if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { 248 248 dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); 249 - return -ENODEV; 249 + goto err_out; 250 250 } 251 251 252 252 if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { 253 253 dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", 254 254 NE_IO_EXTENT, ioaddr); 255 - return -EBUSY; 255 + goto err_out; 256 256 } 257 257 258 258 reg0 = inb(ioaddr); ··· 392 392 free_netdev (dev); 393 393 err_out_free_res: 394 394 release_region (ioaddr, NE_IO_EXTENT); 395 + err_out: 396 + pci_disable_device(pdev); 395 397 return -ENODEV; 396 398 } 397 399
-12
drivers/net/ethernet/Kconfig
··· 156 156 source "drivers/net/ethernet/renesas/Kconfig" 157 157 source "drivers/net/ethernet/rdc/Kconfig" 158 158 source "drivers/net/ethernet/rocker/Kconfig" 159 - 160 - config S6GMAC 161 - tristate "S6105 GMAC ethernet support" 162 - depends on XTENSA_VARIANT_S6000 163 - select PHYLIB 164 - ---help--- 165 - This driver supports the on chip ethernet device on the 166 - S6105 xtensa processor. 167 - 168 - To compile this driver as a module, choose M here. The module 169 - will be called s6gmac. 170 - 171 159 source "drivers/net/ethernet/samsung/Kconfig" 172 160 source "drivers/net/ethernet/seeq/Kconfig" 173 161 source "drivers/net/ethernet/silan/Kconfig"
-1
drivers/net/ethernet/Makefile
··· 66 66 obj-$(CONFIG_SH_ETH) += renesas/ 67 67 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 68 68 obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ 69 - obj-$(CONFIG_S6GMAC) += s6gmac.o 70 69 obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ 71 70 obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 72 71 obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
+3 -1
drivers/net/ethernet/allwinner/sun4i-emac.c
··· 850 850 } 851 851 852 852 db->clk = devm_clk_get(&pdev->dev, NULL); 853 - if (IS_ERR(db->clk)) 853 + if (IS_ERR(db->clk)) { 854 + ret = PTR_ERR(db->clk); 854 855 goto out; 856 + } 855 857 856 858 clk_prepare_enable(db->clk); 857 859
+6 -9
drivers/net/ethernet/altera/altera_tse_main.c
··· 1170 1170 init_error: 1171 1171 free_skbufs(dev); 1172 1172 alloc_skbuf_error: 1173 - if (priv->phydev) { 1174 - phy_disconnect(priv->phydev); 1175 - priv->phydev = NULL; 1176 - } 1177 1173 phy_error: 1178 1174 return ret; 1179 1175 } ··· 1182 1186 int ret; 1183 1187 unsigned long int flags; 1184 1188 1185 - /* Stop and disconnect the PHY */ 1186 - if (priv->phydev) { 1189 + /* Stop the PHY */ 1190 + if (priv->phydev) 1187 1191 phy_stop(priv->phydev); 1188 - phy_disconnect(priv->phydev); 1189 - priv->phydev = NULL; 1190 - } 1191 1192 1192 1193 netif_stop_queue(dev); 1193 1194 napi_disable(&priv->napi); ··· 1518 1525 static int altera_tse_remove(struct platform_device *pdev) 1519 1526 { 1520 1527 struct net_device *ndev = platform_get_drvdata(pdev); 1528 + struct altera_tse_private *priv = netdev_priv(ndev); 1529 + 1530 + if (priv->phydev) 1531 + phy_disconnect(priv->phydev); 1521 1532 1522 1533 platform_set_drvdata(pdev, NULL); 1523 1534 altera_tse_mdio_destroy(ndev);
+13 -11
drivers/net/ethernet/atheros/alx/main.c
··· 184 184 schedule_work(&alx->reset_wk); 185 185 } 186 186 187 - static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) 187 + static int alx_clean_rx_irq(struct alx_priv *alx, int budget) 188 188 { 189 189 struct alx_rx_queue *rxq = &alx->rxq; 190 190 struct alx_rrd *rrd; 191 191 struct alx_buffer *rxb; 192 192 struct sk_buff *skb; 193 193 u16 length, rfd_cleaned = 0; 194 + int work = 0; 194 195 195 - while (budget > 0) { 196 + while (work < budget) { 196 197 rrd = &rxq->rrd[rxq->rrd_read_idx]; 197 198 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 198 199 break; ··· 204 203 ALX_GET_FIELD(le32_to_cpu(rrd->word0), 205 204 RRD_NOR) != 1) { 206 205 alx_schedule_reset(alx); 207 - return 0; 206 + return work; 208 207 } 209 208 210 209 rxb = &rxq->bufs[rxq->read_idx]; ··· 244 243 } 245 244 246 245 napi_gro_receive(&alx->napi, skb); 247 - budget--; 246 + work++; 248 247 249 248 next_pkt: 250 249 if (++rxq->read_idx == alx->rx_ringsz) ··· 259 258 if (rfd_cleaned) 260 259 alx_refill_rx_ring(alx, GFP_ATOMIC); 261 260 262 - return budget > 0; 261 + return work; 263 262 } 264 263 265 264 static int alx_poll(struct napi_struct *napi, int budget) 266 265 { 267 266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 268 267 struct alx_hw *hw = &alx->hw; 269 - bool complete = true; 270 268 unsigned long flags; 269 + bool tx_complete; 270 + int work; 271 271 272 - complete = alx_clean_tx_irq(alx) && 273 - alx_clean_rx_irq(alx, budget); 272 + tx_complete = alx_clean_tx_irq(alx); 273 + work = alx_clean_rx_irq(alx, budget); 274 274 275 - if (!complete) 276 - return 1; 275 + if (!tx_complete || work == budget) 276 + return budget; 277 277 278 278 napi_complete(&alx->napi); 279 279 ··· 286 284 287 285 alx_post_write(hw); 288 286 289 - return 0; 287 + return work; 290 288 } 291 289 292 290 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+5 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12553 12553 return 0; 12554 12554 } 12555 12555 12556 - static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) 12556 + static netdev_features_t bnx2x_features_check(struct sk_buff *skb, 12557 + struct net_device *dev, 12558 + netdev_features_t features) 12557 12559 { 12558 - return vxlan_gso_check(skb); 12560 + return vxlan_features_check(skb, features); 12559 12561 } 12560 12562 12561 12563 static const struct net_device_ops bnx2x_netdev_ops = { ··· 12591 12589 #endif 12592 12590 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 12593 12591 .ndo_set_vf_link_state = bnx2x_set_vf_link_state, 12594 - .ndo_gso_check = bnx2x_gso_check, 12592 + .ndo_features_check = bnx2x_features_check, 12595 12593 }; 12596 12594 12597 12595 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
+37 -20
drivers/net/ethernet/broadcom/tg3.c
··· 7413 7413 } 7414 7414 7415 7415 static void tg3_irq_quiesce(struct tg3 *tp) 7416 + __releases(tp->lock) 7417 + __acquires(tp->lock) 7416 7418 { 7417 7419 int i; 7418 7420 ··· 7423 7421 tp->irq_sync = 1; 7424 7422 smp_mb(); 7425 7423 7424 + spin_unlock_bh(&tp->lock); 7425 + 7426 7426 for (i = 0; i < tp->irq_cnt; i++) 7427 7427 synchronize_irq(tp->napi[i].irq_vec); 7428 + 7429 + spin_lock_bh(&tp->lock); 7428 7430 } 7429 7431 7430 7432 /* Fully shutdown all tg3 driver activity elsewhere in the system. ··· 9024 9018 9025 9019 /* tp->lock is held. */ 9026 9020 static int tg3_chip_reset(struct tg3 *tp) 9021 + __releases(tp->lock) 9022 + __acquires(tp->lock) 9027 9023 { 9028 9024 u32 val; 9029 9025 void (*write_op)(struct tg3 *, u32, u32); ··· 9081 9073 } 9082 9074 smp_mb(); 9083 9075 9076 + tg3_full_unlock(tp); 9077 + 9084 9078 for (i = 0; i < tp->irq_cnt; i++) 9085 9079 synchronize_irq(tp->napi[i].irq_vec); 9080 + 9081 + tg3_full_lock(tp, 0); 9086 9082 9087 9083 if (tg3_asic_rev(tp) == ASIC_REV_57780) { 9088 9084 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; ··· 10915 10903 { 10916 10904 struct tg3 *tp = (struct tg3 *) __opaque; 10917 10905 10918 - if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) 10919 - goto restart_timer; 10920 - 10921 10906 spin_lock(&tp->lock); 10907 + 10908 + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { 10909 + spin_unlock(&tp->lock); 10910 + goto restart_timer; 10911 + } 10922 10912 10923 10913 if (tg3_asic_rev(tp) == ASIC_REV_5717 || 10924 10914 tg3_flag(tp, 57765_CLASS)) ··· 11115 11101 struct tg3 *tp = container_of(work, struct tg3, reset_task); 11116 11102 int err; 11117 11103 11104 + rtnl_lock(); 11118 11105 tg3_full_lock(tp, 0); 11119 11106 11120 11107 if (!netif_running(tp->dev)) { 11121 11108 tg3_flag_clear(tp, RESET_TASK_PENDING); 11122 11109 tg3_full_unlock(tp); 11110 + rtnl_unlock(); 11123 11111 return; 11124 11112 } 11125 11113 ··· 11154 11138 tg3_phy_start(tp); 11155 11139 11156 11140 tg3_flag_clear(tp, RESET_TASK_PENDING); 11141 + rtnl_unlock(); 11157 11142 } 11158 11143 11159 11144 static int tg3_request_irq(struct tg3 *tp, int irq_num) ··· 17817 17800 goto err_out_apeunmap; 17818 17801 } 17819 17802 17820 - /* 17821 - * Reset chip in case UNDI or EFI driver did not shutdown 17822 - * DMA self test will enable WDMAC and we'll see (spurious) 17823 - * pending DMA on the PCI bus at that point. 17824 - */ 17825 - if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17826 - (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17827 - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17828 - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17829 - } 17830 - 17831 - err = tg3_test_dma(tp); 17832 - if (err) { 17833 - dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17834 - goto err_out_apeunmap; 17835 - } 17836 - 17837 17803 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 17838 17804 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 17839 17805 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; ··· 17859 17859 sndmbx -= 0x4; 17860 17860 else 17861 17861 sndmbx += 0xc; 17862 + } 17863 + 17864 + /* 17865 + * Reset chip in case UNDI or EFI driver did not shutdown 17866 + * DMA self test will enable WDMAC and we'll see (spurious) 17867 + * pending DMA on the PCI bus at that point. 17868 + */ 17869 + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17870 + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17871 + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17872 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17873 + } 17874 + 17875 + err = tg3_test_dma(tp); 17876 + if (err) { 17877 + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); 17878 + goto err_out_apeunmap; 17862 17879 } 17863 17880 17864 17881 tg3_init_coal(tp);
+1 -1
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
··· 172 172 173 173 /* Retrieve flash partition info */ 174 174 fcomp.comp_status = 0; 175 - init_completion(&fcomp.comp); 175 + reinit_completion(&fcomp.comp); 176 176 spin_lock_irqsave(&bnad->bna_lock, flags); 177 177 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, 178 178 bnad_cb_completion, &fcomp);
+5 -5
drivers/net/ethernet/cadence/at91_ether.c
··· 340 340 res = PTR_ERR(lp->pclk); 341 341 goto err_free_dev; 342 342 } 343 - clk_enable(lp->pclk); 343 + clk_prepare_enable(lp->pclk); 344 344 345 345 lp->hclk = ERR_PTR(-ENOENT); 346 346 lp->tx_clk = ERR_PTR(-ENOENT); ··· 406 406 err_out_unregister_netdev: 407 407 unregister_netdev(dev); 408 408 err_disable_clock: 409 - clk_disable(lp->pclk); 409 + clk_disable_unprepare(lp->pclk); 410 410 err_free_dev: 411 411 free_netdev(dev); 412 412 return res; ··· 424 424 kfree(lp->mii_bus->irq); 425 425 mdiobus_free(lp->mii_bus); 426 426 unregister_netdev(dev); 427 - clk_disable(lp->pclk); 427 + clk_disable_unprepare(lp->pclk); 428 428 free_netdev(dev); 429 429 430 430 return 0; ··· 440 440 netif_stop_queue(net_dev); 441 441 netif_device_detach(net_dev); 442 442 443 - clk_disable(lp->pclk); 443 + clk_disable_unprepare(lp->pclk); 444 444 } 445 445 return 0; 446 446 } ··· 451 451 struct macb *lp = netdev_priv(net_dev); 452 452 453 453 if (netif_running(net_dev)) { 454 - clk_enable(lp->pclk); 454 + clk_prepare_enable(lp->pclk); 455 455 456 456 netif_device_attach(net_dev); 457 457 netif_start_queue(net_dev);
+4
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
··· 96 96 s16 xact_addr_filt; /* index of our MAC address filter */ 97 97 u16 rss_size; /* size of VI's RSS table slice */ 98 98 u8 pidx; /* index into adapter port[] */ 99 + s8 mdio_addr; 100 + u8 port_type; /* firmware port type */ 101 + u8 mod_type; /* firmware module type */ 99 102 u8 port_id; /* physical port ID */ 100 103 u8 nqsets; /* # of "Queue Sets" */ 101 104 u8 first_qset; /* index of first "Queue Set" */ ··· 525 522 * is "contracted" to provide for the common code. 526 523 */ 527 524 void t4vf_os_link_changed(struct adapter *, int, int); 525 + void t4vf_os_portmod_changed(struct adapter *, int); 528 526 529 527 /* 530 528 * SGE function prototype declarations.
+127 -15
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 44 44 #include <linux/etherdevice.h> 45 45 #include <linux/debugfs.h> 46 46 #include <linux/ethtool.h> 47 + #include <linux/mdio.h> 47 48 48 49 #include "t4vf_common.h" 49 50 #include "t4vf_defs.h" ··· 208 207 netif_carrier_off(dev); 209 208 netdev_info(dev, "link down\n"); 210 209 } 210 + } 211 + 212 + /* 213 + * THe port module type has changed on the indicated "port" (Virtual 214 + * Interface). 215 + */ 216 + void t4vf_os_portmod_changed(struct adapter *adapter, int pidx) 217 + { 218 + static const char * const mod_str[] = { 219 + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 220 + }; 221 + const struct net_device *dev = adapter->port[pidx]; 222 + const struct port_info *pi = netdev_priv(dev); 223 + 224 + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 225 + dev_info(adapter->pdev_dev, "%s: port module unplugged\n", 226 + dev->name); 227 + else if (pi->mod_type < ARRAY_SIZE(mod_str)) 228 + dev_info(adapter->pdev_dev, "%s: %s port module inserted\n", 229 + dev->name, mod_str[pi->mod_type]); 230 + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 231 + dev_info(adapter->pdev_dev, "%s: unsupported optical port " 232 + "module inserted\n", dev->name); 233 + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 234 + dev_info(adapter->pdev_dev, "%s: unknown port module inserted," 235 + "forcing TWINAX\n", dev->name); 236 + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) 237 + dev_info(adapter->pdev_dev, "%s: transceiver module error\n", 238 + dev->name); 239 + else 240 + dev_info(adapter->pdev_dev, "%s: unknown module type %d " 241 + "inserted\n", dev->name, pi->mod_type); 211 242 } 212 243 213 244 /* ··· 1226 1193 * state of the port to which we're linked. 1227 1194 */ 1228 1195 1229 - /* 1230 - * Return current port link settings. 1231 - */ 1232 - static int cxgb4vf_get_settings(struct net_device *dev, 1233 - struct ethtool_cmd *cmd) 1196 + static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type, 1197 + unsigned int caps) 1234 1198 { 1235 - const struct port_info *pi = netdev_priv(dev); 1199 + unsigned int v = 0; 1236 1200 1237 - cmd->supported = pi->link_cfg.supported; 1238 - cmd->advertising = pi->link_cfg.advertising; 1201 + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || 1202 + type == FW_PORT_TYPE_BT_XAUI) { 1203 + v |= SUPPORTED_TP; 1204 + if (caps & FW_PORT_CAP_SPEED_100M) 1205 + v |= SUPPORTED_100baseT_Full; 1206 + if (caps & FW_PORT_CAP_SPEED_1G) 1207 + v |= SUPPORTED_1000baseT_Full; 1208 + if (caps & FW_PORT_CAP_SPEED_10G) 1209 + v |= SUPPORTED_10000baseT_Full; 1210 + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { 1211 + v |= SUPPORTED_Backplane; 1212 + if (caps & FW_PORT_CAP_SPEED_1G) 1213 + v |= SUPPORTED_1000baseKX_Full; 1214 + if (caps & FW_PORT_CAP_SPEED_10G) 1215 + v |= SUPPORTED_10000baseKX4_Full; 1216 + } else if (type == FW_PORT_TYPE_KR) 1217 + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 1218 + else if (type == FW_PORT_TYPE_BP_AP) 1219 + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 1220 + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; 1221 + else if (type == FW_PORT_TYPE_BP4_AP) 1222 + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 1223 + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 1224 + SUPPORTED_10000baseKX4_Full; 1225 + else if (type == FW_PORT_TYPE_FIBER_XFI || 1226 + type == FW_PORT_TYPE_FIBER_XAUI || 1227 + type == FW_PORT_TYPE_SFP || 1228 + type == FW_PORT_TYPE_QSFP_10G || 1229 + type == FW_PORT_TYPE_QSA) { 1230 + v |= SUPPORTED_FIBRE; 1231 + if (caps & FW_PORT_CAP_SPEED_1G) 1232 + v |= SUPPORTED_1000baseT_Full; 1233 + if (caps & FW_PORT_CAP_SPEED_10G) 1234 + v |= SUPPORTED_10000baseT_Full; 1235 + } else if (type == FW_PORT_TYPE_BP40_BA || 1236 + type == FW_PORT_TYPE_QSFP) { 1237 + v |= SUPPORTED_40000baseSR4_Full; 1238 + v |= SUPPORTED_FIBRE; 1239 + } 1240 + 1241 + if (caps & FW_PORT_CAP_ANEG) 1242 + v |= SUPPORTED_Autoneg; 1243 + return v; 1244 + } 1245 + 1246 + static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1247 + { 1248 + const struct port_info *p = netdev_priv(dev); 1249 + 1250 + if (p->port_type == FW_PORT_TYPE_BT_SGMII || 1251 + p->port_type == FW_PORT_TYPE_BT_XFI || 1252 + p->port_type == FW_PORT_TYPE_BT_XAUI) 1253 + cmd->port = PORT_TP; 1254 + else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 1255 + p->port_type == FW_PORT_TYPE_FIBER_XAUI) 1256 + cmd->port = PORT_FIBRE; 1257 + else if (p->port_type == FW_PORT_TYPE_SFP || 1258 + p->port_type == FW_PORT_TYPE_QSFP_10G || 1259 + p->port_type == FW_PORT_TYPE_QSA || 1260 + p->port_type == FW_PORT_TYPE_QSFP) { 1261 + if (p->mod_type == FW_PORT_MOD_TYPE_LR || 1262 + p->mod_type == FW_PORT_MOD_TYPE_SR || 1263 + p->mod_type == FW_PORT_MOD_TYPE_ER || 1264 + p->mod_type == FW_PORT_MOD_TYPE_LRM) 1265 + cmd->port = PORT_FIBRE; 1266 + else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 1267 + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 1268 + cmd->port = PORT_DA; 1269 + else 1270 + cmd->port = PORT_OTHER; 1271 + } else 1272 + cmd->port = PORT_OTHER; 1273 + 1274 + if (p->mdio_addr >= 0) { 1275 + cmd->phy_address = p->mdio_addr; 1276 + cmd->transceiver = XCVR_EXTERNAL; 1277 + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? 1278 + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; 1279 + } else { 1280 + cmd->phy_address = 0; /* not really, but no better option */ 1281 + cmd->transceiver = XCVR_INTERNAL; 1282 + cmd->mdio_support = 0; 1283 + } 1284 + 1285 + cmd->supported = t4vf_from_fw_linkcaps(p->port_type, 1286 + p->link_cfg.supported); 1287 + cmd->advertising = t4vf_from_fw_linkcaps(p->port_type, 1288 + p->link_cfg.advertising); 1239 1289 ethtool_cmd_speed_set(cmd, 1240 - netif_carrier_ok(dev) ? pi->link_cfg.speed : -1); 1290 + netif_carrier_ok(dev) ? p->link_cfg.speed : 0); 1241 1291 cmd->duplex = DUPLEX_FULL; 1242 - 1243 - cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 1244 - cmd->phy_address = pi->port_id; 1245 - cmd->transceiver = XCVR_EXTERNAL; 1246 - cmd->autoneg = pi->link_cfg.autoneg; 1292 + cmd->autoneg = p->link_cfg.autoneg; 1247 1293 cmd->maxtxpkt = 0; 1248 1294 cmd->maxrxpkt = 0; 1249 1295 return 0; ··· 2430 2318 */ 2431 2319 n10g = 0; 2432 2320 for_each_port(adapter, pidx) 2433 - n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); 2321 + n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); 2434 2322 2435 2323 /* 2436 2324 * We default to 1 queue per non-10G port and up to # of cores queues
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
··· 230 230 231 231 static inline bool is_10g_port(const struct link_config *lc) 232 232 { 233 - return (lc->supported & SUPPORTED_10000baseT_Full) != 0; 233 + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 234 234 } 235 235 236 236 static inline bool is_x_10g_port(const struct link_config *lc)
+31 -25
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
··· 245 245 return a & 0x3f; 246 246 } 247 247 248 + #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 249 + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 250 + FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 251 + 248 252 /** 249 253 * init_link_config - initialize a link's SW state 250 254 * @lc: structure holding the link state ··· 263 259 lc->requested_speed = 0; 264 260 lc->speed = 0; 265 261 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 266 - if (lc->supported & SUPPORTED_Autoneg) { 267 - lc->advertising = lc->supported; 262 + if (lc->supported & FW_PORT_CAP_ANEG) { 263 + lc->advertising = lc->supported & ADVERT_MASK; 268 264 lc->autoneg = AUTONEG_ENABLE; 269 265 lc->requested_fc |= PAUSE_AUTONEG; 270 266 } else { ··· 284 280 struct fw_vi_cmd vi_cmd, vi_rpl; 285 281 struct fw_port_cmd port_cmd, port_rpl; 286 282 int v; 287 - u32 word; 288 283 289 284 /* 290 285 * Execute a VI Read command to get our Virtual Interface information ··· 322 319 if (v) 323 320 return v; 324 321 325 - v = 0; 326 - word = be16_to_cpu(port_rpl.u.info.pcap); 327 - if (word & FW_PORT_CAP_SPEED_100M) 328 - v |= SUPPORTED_100baseT_Full; 329 - if (word & FW_PORT_CAP_SPEED_1G) 330 - v |= SUPPORTED_1000baseT_Full; 331 - if (word & FW_PORT_CAP_SPEED_10G) 332 - v |= SUPPORTED_10000baseT_Full; 333 - if (word & FW_PORT_CAP_SPEED_40G) 334 - v |= SUPPORTED_40000baseSR4_Full; 335 - if (word & FW_PORT_CAP_ANEG) 336 - v |= SUPPORTED_Autoneg; 337 - init_link_config(&pi->link_cfg, v); 322 + v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); 323 + pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? 324 + FW_PORT_CMD_MDIOADDR_G(v) : -1; 325 + pi->port_type = FW_PORT_CMD_PTYPE_G(v); 326 + pi->mod_type = FW_PORT_MOD_TYPE_NA; 327 + 328 + init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); 338 329 339 330 return 0; 340 331 } ··· 1488 1491 */ 1489 1492 const struct fw_port_cmd *port_cmd = 1490 1493 (const struct fw_port_cmd *)rpl; 1491 - u32 word; 1494 + u32 stat, mod; 1492 1495 int action, port_id, link_ok, speed, fc, pidx; 1493 1496 1494 1497 /* ··· 1506 1509 port_id = FW_PORT_CMD_PORTID_G( 1507 1510 be32_to_cpu(port_cmd->op_to_portid)); 1508 1511 1509 - word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1510 - link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0; 1512 + stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); 1513 + link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; 1511 1514 speed = 0; 1512 1515 fc = 0; 1513 - if (word & FW_PORT_CMD_RXPAUSE_F) 1516 + if (stat & FW_PORT_CMD_RXPAUSE_F) 1514 1517 fc |= PAUSE_RX; 1515 - if (word & FW_PORT_CMD_TXPAUSE_F) 1518 + if (stat & FW_PORT_CMD_TXPAUSE_F) 1516 1519 fc |= PAUSE_TX; 1517 - if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1520 + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) 1518 1521 speed = 100; 1519 - else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1522 + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) 1520 1523 speed = 1000; 1521 - else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1524 + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1522 1525 speed = 10000; 1523 - else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1526 + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1524 1527 speed = 40000; 1525 1528 1526 1529 /* ··· 1537 1540 continue; 1538 1541 1539 1542 lc = &pi->link_cfg; 1543 + 1544 + mod = FW_PORT_CMD_MODTYPE_G(stat); 1545 + if (mod != pi->mod_type) { 1546 + pi->mod_type = mod; 1547 + t4vf_os_portmod_changed(adapter, pidx); 1548 + } 1549 + 1540 1550 if (link_ok != lc->link_ok || speed != lc->speed || 1541 1551 fc != lc->fc) { 1542 1552 /* something changed */ 1543 1553 lc->link_ok = link_ok; 1544 1554 lc->speed = speed; 1545 1555 lc->fc = fc; 1556 + lc->supported = 1557 + be16_to_cpu(port_cmd->u.info.pcap); 1546 1558 t4vf_os_link_changed(adapter, pidx, link_ok); 1547 1559 } 1548 1560 }
+12 -6
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1060 1060 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); 1061 1061 } 1062 1062 1063 - if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { 1064 - skb->csum = htons(checksum); 1065 - skb->ip_summed = CHECKSUM_COMPLETE; 1066 - } 1063 + /* Hardware does not provide whole packet checksum. It only 1064 + * provides pseudo checksum. Since hw validates the packet 1065 + * checksum but not provide us the checksum value. use 1066 + * CHECSUM_UNNECESSARY. 1067 + */ 1068 + if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && 1069 + ipv4_csum_ok) 1070 + skb->ip_summed = CHECKSUM_UNNECESSARY; 1067 1071 1068 1072 if (vlan_stripped) 1069 1073 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); ··· 1616 1612 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1617 1613 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1618 1614 err = -ENOMEM; 1619 - goto err_out_notify_unset; 1615 + goto err_out_free_rq; 1620 1616 } 1621 1617 } 1622 1618 ··· 1649 1645 1650 1646 return 0; 1651 1647 1652 - err_out_notify_unset: 1648 + err_out_free_rq: 1649 + for (i = 0; i < enic->rq_count; i++) 1650 + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); 1653 1651 enic_dev_notify_unset(enic); 1654 1652 err_out_free_intr: 1655 1653 enic_free_intr(enic);
+5 -13
drivers/net/ethernet/dnet.c
··· 398 398 * break out of while loop if there are no more 399 399 * packets waiting 400 400 */ 401 - if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { 402 - napi_complete(napi); 403 - int_enable = dnet_readl(bp, INTR_ENB); 404 - int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 405 - dnet_writel(bp, int_enable, INTR_ENB); 406 - return 0; 407 - } 401 + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) 402 + break; 408 403 409 404 cmd_word = dnet_readl(bp, RX_LEN_FIFO); 410 405 pkt_len = cmd_word & 0xFFFF; ··· 428 433 "size %u.\n", dev->name, pkt_len); 429 434 } 430 435 431 - budget -= npackets; 432 - 433 436 if (npackets < budget) { 434 437 /* We processed all packets available. Tell NAPI it can 435 - * stop polling then re-enable rx interrupts */ 438 + * stop polling then re-enable rx interrupts. 439 + */ 436 440 napi_complete(napi); 437 441 int_enable = dnet_readl(bp, INTR_ENB); 438 442 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; 439 443 dnet_writel(bp, int_enable, INTR_ENB); 440 - return 0; 441 444 } 442 445 443 - /* There are still packets waiting */ 444 - return 1; 446 + return npackets; 445 447 } 446 448 447 449 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
+5 -3
drivers/net/ethernet/emulex/benet/be_main.c
··· 4459 4459 adapter->vxlan_port_count--; 4460 4460 } 4461 4461 4462 - static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) 4462 + static netdev_features_t be_features_check(struct sk_buff *skb, 4463 + struct net_device *dev, 4464 + netdev_features_t features) 4463 4465 { 4464 - return vxlan_gso_check(skb); 4466 + return vxlan_features_check(skb, features); 4465 4467 } 4466 4468 #endif 4467 4469 ··· 4494 4492 #ifdef CONFIG_BE2NET_VXLAN 4495 4493 .ndo_add_vxlan_port = be_add_vxlan_port, 4496 4494 .ndo_del_vxlan_port = be_del_vxlan_port, 4497 - .ndo_gso_check = be_gso_check, 4495 + .ndo_features_check = be_features_check, 4498 4496 #endif 4499 4497 }; 4500 4498
+2
drivers/net/ethernet/freescale/fec.h
··· 424 424 * (40ns * 6). 425 425 */ 426 426 #define FEC_QUIRK_BUG_CAPTURE (1 << 10) 427 + /* Controller has only one MDIO bus */ 428 + #define FEC_QUIRK_SINGLE_MDIO (1 << 11) 427 429 428 430 struct fec_enet_priv_tx_q { 429 431 int index;
+6 -4
drivers/net/ethernet/freescale/fec_main.c
··· 91 91 .driver_data = 0, 92 92 }, { 93 93 .name = "imx28-fec", 94 - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 94 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 95 + FEC_QUIRK_SINGLE_MDIO, 95 96 }, { 96 97 .name = "imx6q-fec", 97 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | ··· 1938 1937 int err = -ENXIO, i; 1939 1938 1940 1939 /* 1941 - * The dual fec interfaces are not equivalent with enet-mac. 1940 + * The i.MX28 dual fec interfaces are not equal. 1942 1941 * Here are the differences: 1943 1942 * 1944 1943 * - fec0 supports MII & RMII modes while fec1 only supports RMII ··· 1953 1952 * mdio interface in board design, and need to be configured by 1954 1953 * fec0 mii_bus. 1955 1954 */ 1956 - if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { 1955 + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1957 1956 /* fec1 uses fec0 mii_bus */ 1958 1957 if (mii_cnt && fec0_mii_bus) { 1959 1958 fep->mii_bus = fec0_mii_bus; ··· 2016 2015 mii_cnt++; 2017 2016 2018 2017 /* save fec0 mii_bus */ 2019 - if (fep->quirks & FEC_QUIRK_ENET_MAC) 2018 + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2020 2019 fec0_mii_bus = fep->mii_bus; 2021 2020 2022 2021 return 0; ··· 3130 3129 pdev->id_entry = of_id->data; 3131 3130 fep->quirks = pdev->id_entry->driver_data; 3132 3131 3132 + fep->netdev = ndev; 3133 3133 fep->num_rx_queues = num_rx_qs; 3134 3134 fep->num_tx_queues = num_tx_qs; 3135 3135
+11
drivers/net/ethernet/intel/Kconfig
··· 281 281 282 282 If unsure, say N. 283 283 284 + config I40E_FCOE 285 + bool "Fibre Channel over Ethernet (FCoE)" 286 + default n 287 + depends on I40E && DCB && FCOE 288 + ---help--- 289 + Say Y here if you want to use Fibre Channel over Ethernet (FCoE) 290 + in the driver. This will create new netdev for exclusive FCoE 291 + use with XL710 FCoE offloads enabled. 292 + 293 + If unsure, say N. 294 + 284 295 config I40EVF 285 296 tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" 286 297 depends on PCI_MSI
+1 -1
drivers/net/ethernet/intel/e100.c
··· 1543 1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); 1544 1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1545 1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && 1546 - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { 1546 + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { 1547 1547 /* enable/disable MDI/MDI-X auto-switching. */ 1548 1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 1549 1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
+1 -1
drivers/net/ethernet/intel/i40e/Makefile
··· 44 44 i40e_virtchnl_pf.o 45 45 46 46 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o 47 - i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o 47 + i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
+3 -1
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
··· 829 829 if (desc_n >= ring->count || desc_n < 0) { 830 830 dev_info(&pf->pdev->dev, 831 831 "descriptor %d not found\n", desc_n); 832 - return; 832 + goto out; 833 833 } 834 834 if (!is_rx_ring) { 835 835 txd = I40E_TX_DESC(ring, desc_n); ··· 855 855 } else { 856 856 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); 857 857 } 858 + 859 + out: 858 860 kfree(ring); 859 861 } 860 862
+2 -2
drivers/net/ethernet/intel/i40e/i40e_osdep.h
··· 78 78 } while (0) 79 79 80 80 typedef enum i40e_status_code i40e_status; 81 - #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 81 + #ifdef CONFIG_I40E_FCOE 82 82 #define I40E_FCOE 83 - #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ 83 + #endif 84 84 #endif /* _I40E_OSDEP_H_ */
+72 -32
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 658 658 return le32_to_cpu(*(volatile __le32 *)head); 659 659 } 660 660 661 + #define WB_STRIDE 0x3 662 + 661 663 /** 662 664 * i40e_clean_tx_irq - Reclaim resources after transmit completes 663 665 * @tx_ring: tx ring to clean ··· 761 759 tx_ring->q_vector->tx.total_bytes += total_bytes; 762 760 tx_ring->q_vector->tx.total_packets += total_packets; 763 761 762 + /* check to see if there are any non-cache aligned descriptors 763 + * waiting to be written back, and kick the hardware to force 764 + * them to be written back in case of napi polling 765 + */ 766 + if (budget && 767 + !((i & WB_STRIDE) == WB_STRIDE) && 768 + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && 769 + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) 770 + tx_ring->arm_wb = true; 771 + else 772 + tx_ring->arm_wb = false; 773 + 764 774 if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { 765 775 /* schedule immediate reset if we believe we hung */ 766 776 dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" ··· 791 777 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); 792 778 793 779 dev_info(tx_ring->dev, 794 - "tx hang detected on queue %d, resetting adapter\n", 780 + "tx hang detected on queue %d, reset requested\n", 795 781 tx_ring->queue_index); 796 782 797 - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); 783 + /* do not fire the reset immediately, wait for the stack to 784 + * decide we are truly stuck, also prevents every queue from 785 + * simultaneously requesting a reset 786 + */ 798 787 799 - /* the adapter is about to reset, no point in enabling stuff */ 800 - return true; 788 + /* the adapter is about to reset, no point in enabling polling */ 789 + budget = 1; 801 790 } 802 791 803 792 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, ··· 823 806 } 824 807 } 825 808 826 - return budget > 0; 809 + return !!budget; 810 + } 811 + 812 + /** 813 + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors 814 + * @vsi: the VSI we care about 815 + * @q_vector: the vector on which to force writeback 816 + * 817 + **/ 818 + static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) 819 + { 820 + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | 821 + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | 822 + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK 823 + /* allow 00 to be written to the index */; 824 + 825 + wr32(&vsi->back->hw, 826 + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), 827 + val); 827 828 } 828 829 829 830 /** ··· 1325 1290 * so the total length of IPv4 header is IHL*4 bytes 1326 1291 * The UDP_0 bit *may* bet set if the *inner* header is UDP 1327 1292 */ 1328 - if (ipv4_tunnel && 1329 - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && 1330 - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { 1293 + if (ipv4_tunnel) { 1331 1294 skb->transport_header = skb->mac_header + 1332 1295 sizeof(struct ethhdr) + 1333 1296 (ip_hdr(skb)->ihl * 4); ··· 1335 1302 skb->protocol == htons(ETH_P_8021AD)) 1336 1303 ? VLAN_HLEN : 0; 1337 1304 1338 - rx_udp_csum = udp_csum(skb); 1339 - iph = ip_hdr(skb); 1340 - csum = csum_tcpudp_magic( 1341 - iph->saddr, iph->daddr, 1342 - (skb->len - skb_transport_offset(skb)), 1343 - IPPROTO_UDP, rx_udp_csum); 1305 + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && 1306 + (udp_hdr(skb)->check != 0)) { 1307 + rx_udp_csum = udp_csum(skb); 1308 + iph = ip_hdr(skb); 1309 + csum = csum_tcpudp_magic( 1310 + iph->saddr, iph->daddr, 1311 + (skb->len - skb_transport_offset(skb)), 1312 + IPPROTO_UDP, rx_udp_csum); 1344 1313 1345 - if (udp_hdr(skb)->check != csum) 1346 - goto checksum_fail; 1314 + if (udp_hdr(skb)->check != csum) 1315 + goto checksum_fail; 1316 + 1317 + } /* else its GRE and so no outer UDP header */ 1347 1318 } 1348 1319 1349 1320 skb->ip_summed = CHECKSUM_UNNECESSARY; ··· 1618 1581 struct i40e_vsi *vsi = q_vector->vsi; 1619 1582 struct i40e_ring *ring; 1620 1583 bool clean_complete = true; 1584 + bool arm_wb = false; 1621 1585 int budget_per_ring; 1622 1586 1623 1587 if (test_bit(__I40E_DOWN, &vsi->state)) { ··· 1629 1591 /* Since the actual Tx work is minimal, we can give the Tx a larger 1630 1592 * budget and be more aggressive about cleaning up the Tx descriptors. 1631 1593 */ 1632 - i40e_for_each_ring(ring, q_vector->tx) 1594 + i40e_for_each_ring(ring, q_vector->tx) { 1633 1595 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); 1596 + arm_wb |= ring->arm_wb; 1597 + } 1634 1598 1635 1599 /* We attempt to distribute budget to each Rx queue fairly, but don't 1636 1600 * allow the budget to go below 1 because that would exit polling early. ··· 1643 1603 clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); 1644 1604 1645 1605 /* If work not completed, return budget and polling will return */ 1646 - if (!clean_complete) 1606 + if (!clean_complete) { 1607 + if (arm_wb) 1608 + i40e_force_wb(vsi, q_vector); 1647 1609 return budget; 1610 + } 1648 1611 1649 1612 /* Work is done so exit the polling mode and re-enable the interrupt */ 1650 1613 napi_complete(napi); ··· 1883 1840 if (err < 0) 1884 1841 return err; 1885 1842 1886 - if (protocol == htons(ETH_P_IP)) { 1887 - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1843 + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1844 + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); 1845 + 1846 + if (iph->version == 4) { 1888 1847 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1889 1848 iph->tot_len = 0; 1890 1849 iph->check = 0; 1891 1850 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1892 1851 0, IPPROTO_TCP, 0); 1893 - } else if (skb_is_gso_v6(skb)) { 1894 - 1895 - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) 1896 - : ipv6_hdr(skb); 1852 + } else if (ipv6h->version == 6) { 1897 1853 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1898 1854 ipv6h->payload_len = 0; 1899 1855 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, ··· 1988 1946 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1989 1947 } 1990 1948 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1991 - if (tx_flags & I40E_TX_FLAGS_TSO) { 1992 - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1949 + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1950 + if (tx_flags & I40E_TX_FLAGS_TSO) 1993 1951 ip_hdr(skb)->check = 0; 1994 - } else { 1995 - *cd_tunneling |= 1996 - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1997 - } 1998 1952 } 1999 1953 2000 1954 /* Now set the ctx descriptor fields */ ··· 2000 1962 ((skb_inner_network_offset(skb) - 2001 1963 skb_transport_offset(skb)) >> 1) << 2002 1964 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 2003 - 1965 + if (this_ip_hdr->version == 6) { 1966 + tx_flags &= ~I40E_TX_FLAGS_IPV4; 1967 + tx_flags |= I40E_TX_FLAGS_IPV6; 1968 + } 2004 1969 } else { 2005 1970 network_hdr_len = skb_network_header_len(skb); 2006 1971 this_ip_hdr = ip_hdr(skb); ··· 2239 2198 /* Place RS bit on last descriptor of any packet that spans across the 2240 2199 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. 2241 2200 */ 2242 - #define WB_STRIDE 0x3 2243 2201 if (((i & WB_STRIDE) != WB_STRIDE) && 2244 2202 (first <= &tx_ring->tx_bi[i]) && 2245 2203 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+1
drivers/net/ethernet/intel/i40e/i40e_txrx.h
··· 241 241 unsigned long last_rx_timestamp; 242 242 243 243 bool ring_active; /* is ring online or not */ 244 + bool arm_wb; /* do something to arm write back */ 244 245 245 246 /* stats structs */ 246 247 struct i40e_queue_stats stats;
+1 -1
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 1125 1125 u32 swmask = mask; 1126 1126 u32 fwmask = mask << 16; 1127 1127 s32 ret_val = 0; 1128 - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1128 + s32 i = 0, timeout = 200; 1129 1129 1130 1130 while (i < timeout) { 1131 1131 if (igb_get_hw_semaphore(hw)) {
+6 -4
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 2365 2365 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2366 2366 } 2367 2367 2368 - static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) 2368 + static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, 2369 + struct net_device *dev, 2370 + netdev_features_t features) 2369 2371 { 2370 - return vxlan_gso_check(skb); 2372 + return vxlan_features_check(skb, features); 2371 2373 } 2372 2374 #endif 2373 2375 ··· 2402 2400 #ifdef CONFIG_MLX4_EN_VXLAN 2403 2401 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2404 2402 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2405 - .ndo_gso_check = mlx4_en_gso_check, 2403 + .ndo_features_check = mlx4_en_features_check, 2406 2404 #endif 2407 2405 }; 2408 2406 ··· 2436 2434 #ifdef CONFIG_MLX4_EN_VXLAN 2437 2435 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2438 2436 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2439 - .ndo_gso_check = mlx4_en_gso_check, 2437 + .ndo_features_check = mlx4_en_features_check, 2440 2438 #endif 2441 2439 }; 2442 2440
+11 -1
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 962 962 tx_desc->ctrl.owner_opcode = op_own; 963 963 if (send_doorbell) { 964 964 wmb(); 965 - iowrite32(ring->doorbell_qpn, 965 + /* Since there is no iowrite*_native() that writes the 966 + * value as is, without byteswapping - using the one 967 + * the doesn't do byteswapping in the relevant arch 968 + * endianness. 969 + */ 970 + #if defined(__LITTLE_ENDIAN) 971 + iowrite32( 972 + #else 973 + iowrite32be( 974 + #endif 975 + ring->doorbell_qpn, 966 976 ring->bf.uar->map + MLX4_SEND_DOORBELL); 967 977 } else { 968 978 ring->xmit_more++;
+4 -9
drivers/net/ethernet/mellanox/mlx4/main.c
··· 1829 1829 err = mlx4_dev_cap(dev, &dev_cap); 1830 1830 if (err) { 1831 1831 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 1832 - goto err_stop_fw; 1832 + return err; 1833 1833 } 1834 1834 1835 1835 choose_steering_mode(dev, &dev_cap); ··· 1860 1860 &init_hca); 1861 1861 if ((long long) icm_size < 0) { 1862 1862 err = icm_size; 1863 - goto err_stop_fw; 1863 + return err; 1864 1864 } 1865 1865 1866 1866 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; ··· 1874 1874 1875 1875 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1876 1876 if (err) 1877 - goto err_stop_fw; 1877 + return err; 1878 1878 1879 1879 err = mlx4_INIT_HCA(dev, &init_hca); 1880 1880 if (err) { ··· 1886 1886 err = mlx4_query_func(dev, &dev_cap); 1887 1887 if (err < 0) { 1888 1888 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1889 - goto err_stop_fw; 1889 + goto err_close; 1890 1890 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1891 1891 dev->caps.num_eqs = dev_cap.max_eqs; 1892 1892 dev->caps.reserved_eqs = dev_cap.reserved_eqs; ··· 2006 2006 if (!mlx4_is_slave(dev)) 2007 2007 mlx4_free_icms(dev); 2008 2008 2009 - err_stop_fw: 2010 - if (!mlx4_is_slave(dev)) { 2011 - mlx4_UNMAP_FA(dev); 2012 - mlx4_free_icm(dev, priv->fw.fw_icm, 0); 2013 - } 2014 2009 return err; 2015 2010 } 2016 2011
+5 -4
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 584 584 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) 585 585 { 586 586 mlx4_mtt_cleanup(dev, &mr->mtt); 587 + mr->mtt.order = -1; 587 588 } 588 589 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); 589 590 ··· 594 593 { 595 594 int err; 596 595 597 - mpt_entry->start = cpu_to_be64(iova); 598 - mpt_entry->length = cpu_to_be64(size); 599 - mpt_entry->entity_size = cpu_to_be32(page_shift); 600 - 601 596 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 602 597 if (err) 603 598 return err; 599 + 600 + mpt_entry->start = cpu_to_be64(mr->iova); 601 + mpt_entry->length = cpu_to_be64(mr->size); 602 + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 604 603 605 604 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | 606 605 MLX4_MPT_PD_FLAG_EN_INV);
-6
drivers/net/ethernet/micrel/ksz884x.c
··· 2303 2303 2304 2304 /* Spanning Tree */ 2305 2305 2306 - static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set) 2307 - { 2308 - port_cfg(hw, p, 2309 - KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set); 2310 - } 2311 - 2312 2306 static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set) 2313 2307 { 2314 2308 port_cfg(hw, p,
+3 -1
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 4033 4033 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4034 4034 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 4035 4035 &mgp->cmd_bus, GFP_KERNEL); 4036 - if (mgp->cmd == NULL) 4036 + if (!mgp->cmd) { 4037 + status = -ENOMEM; 4037 4038 goto abort_with_enabled; 4039 + } 4038 4040 4039 4041 mgp->board_span = pci_resource_len(pdev, 0); 4040 4042 mgp->iomem_base = pci_resource_start(pdev, 0);
+3 -5
drivers/net/ethernet/qlogic/qla3xxx.c
··· 146 146 { 147 147 int i = 0; 148 148 149 - while (i < 10) { 150 - if (i) 151 - ssleep(1); 152 - 149 + do { 153 150 if (ql_sem_lock(qdev, 154 151 QL_DRVR_SEM_MASK, 155 152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) ··· 155 158 "driver lock acquired\n"); 156 159 return 1; 157 160 } 158 - } 161 + ssleep(1); 162 + } while (++i < 10); 159 163 160 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 161 165 return 0;
+6 -3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 505 505 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 506 506 } 507 507 508 - static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) 508 + static netdev_features_t qlcnic_features_check(struct sk_buff *skb, 509 + struct net_device *dev, 510 + netdev_features_t features) 509 511 { 510 - return vxlan_gso_check(skb); 512 + return vxlan_features_check(skb, features); 511 513 } 512 514 #endif 513 515 ··· 534 532 #ifdef CONFIG_QLCNIC_VXLAN 535 533 .ndo_add_vxlan_port = qlcnic_add_vxlan_port, 536 534 .ndo_del_vxlan_port = qlcnic_del_vxlan_port, 537 - .ndo_gso_check = qlcnic_gso_check, 535 + .ndo_features_check = qlcnic_features_check, 538 536 #endif 539 537 #ifdef CONFIG_NET_POLL_CONTROLLER 540 538 .ndo_poll_controller = qlcnic_poll_controller, ··· 2605 2603 } else { 2606 2604 dev_err(&pdev->dev, 2607 2605 "%s: failed. Please Reboot\n", __func__); 2606 + err = -ENODEV; 2608 2607 goto err_out_free_hw; 2609 2608 } 2610 2609
+3 -1
drivers/net/ethernet/realtek/8139too.c
··· 787 787 if (rc) 788 788 goto err_out; 789 789 790 + disable_dev_on_err = 1; 790 791 rc = pci_request_regions (pdev, DRV_NAME); 791 792 if (rc) 792 793 goto err_out; 793 - disable_dev_on_err = 1; 794 794 795 795 pci_set_master (pdev); 796 796 ··· 1110 1110 return 0; 1111 1111 1112 1112 err_out: 1113 + netif_napi_del(&tp->napi); 1113 1114 __rtl8139_cleanup_dev (dev); 1114 1115 pci_disable_device (pdev); 1115 1116 return i; ··· 1125 1124 assert (dev != NULL); 1126 1125 1127 1126 cancel_delayed_work_sync(&tp->thread); 1127 + netif_napi_del(&tp->napi); 1128 1128 1129 1129 unregister_netdev (dev); 1130 1130
+8 -1
drivers/net/ethernet/renesas/sh_eth.c
··· 473 473 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 474 474 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 475 475 EESR_ECI, 476 + .fdr_value = 0x00000f0f, 476 477 477 478 .apr = 1, 478 479 .mpr = 1, ··· 496 495 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 497 496 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 498 497 EESR_ECI, 498 + .fdr_value = 0x00000f0f, 499 499 500 500 .apr = 1, 501 501 .mpr = 1, ··· 537 535 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | 538 536 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | 539 537 EESR_ECI, 538 + 539 + .trscer_err_mask = DESC_I_RINT8, 540 540 541 541 .apr = 1, 542 542 .mpr = 1, ··· 860 856 861 857 if (!cd->eesr_err_check) 862 858 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 859 + 860 + if (!cd->trscer_err_mask) 861 + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; 863 862 } 864 863 865 864 static int sh_eth_check_reset(struct net_device *ndev) ··· 1301 1294 /* Frame recv control (enable multiple-packets per rx irq) */ 1302 1295 sh_eth_write(ndev, RMCR_RNC, RMCR); 1303 1296 1304 - sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1297 + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); 1305 1298 1306 1299 if (mdp->cd->bculr) 1307 1300 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
+5
drivers/net/ethernet/renesas/sh_eth.h
··· 369 369 DESC_I_RINT1 = 0x0001, 370 370 }; 371 371 372 + #define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) 373 + 372 374 /* RPADIR */ 373 375 enum RPADIR_BIT { 374 376 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, ··· 471 469 /* interrupt checking mask */ 472 470 unsigned long tx_check; 473 471 unsigned long eesr_err_check; 472 + 473 + /* Error mask */ 474 + unsigned long trscer_err_mask; 474 475 475 476 /* hardware features */ 476 477 unsigned long irq_flags; /* IRQ configuration flags */
-1058
drivers/net/ethernet/s6gmac.c
··· 1 - /* 2 - * Ethernet driver for S6105 on chip network device 3 - * (c)2008 emlix GmbH http://www.emlix.com 4 - * Authors: Oskar Schirmer <oskar@scara.com> 5 - * Daniel Gloeckner <dg@emlix.com> 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the License, or (at your option) any later version. 11 - */ 12 - #include <linux/kernel.h> 13 - #include <linux/module.h> 14 - #include <linux/interrupt.h> 15 - #include <linux/types.h> 16 - #include <linux/delay.h> 17 - #include <linux/spinlock.h> 18 - #include <linux/netdevice.h> 19 - #include <linux/etherdevice.h> 20 - #include <linux/if.h> 21 - #include <linux/stddef.h> 22 - #include <linux/mii.h> 23 - #include <linux/phy.h> 24 - #include <linux/platform_device.h> 25 - #include <variant/hardware.h> 26 - #include <variant/dmac.h> 27 - 28 - #define DRV_NAME "s6gmac" 29 - #define DRV_PRMT DRV_NAME ": " 30 - 31 - 32 - /* register declarations */ 33 - 34 - #define S6_GMAC_MACCONF1 0x000 35 - #define S6_GMAC_MACCONF1_TXENA 0 36 - #define S6_GMAC_MACCONF1_SYNCTX 1 37 - #define S6_GMAC_MACCONF1_RXENA 2 38 - #define S6_GMAC_MACCONF1_SYNCRX 3 39 - #define S6_GMAC_MACCONF1_TXFLOWCTRL 4 40 - #define S6_GMAC_MACCONF1_RXFLOWCTRL 5 41 - #define S6_GMAC_MACCONF1_LOOPBACK 8 42 - #define S6_GMAC_MACCONF1_RESTXFUNC 16 43 - #define S6_GMAC_MACCONF1_RESRXFUNC 17 44 - #define S6_GMAC_MACCONF1_RESTXMACCTRL 18 45 - #define S6_GMAC_MACCONF1_RESRXMACCTRL 19 46 - #define S6_GMAC_MACCONF1_SIMULRES 30 47 - #define S6_GMAC_MACCONF1_SOFTRES 31 48 - #define S6_GMAC_MACCONF2 0x004 49 - #define S6_GMAC_MACCONF2_FULL 0 50 - #define S6_GMAC_MACCONF2_CRCENA 1 51 - #define S6_GMAC_MACCONF2_PADCRCENA 2 52 - #define S6_GMAC_MACCONF2_LENGTHFCHK 4 53 - #define S6_GMAC_MACCONF2_HUGEFRAMENA 5 54 - #define S6_GMAC_MACCONF2_IFMODE 8 55 - #define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1 56 - #define S6_GMAC_MACCONF2_IFMODE_BYTE 2 57 - #define S6_GMAC_MACCONF2_IFMODE_MASK 3 58 - #define S6_GMAC_MACCONF2_PREAMBLELEN 12 59 - #define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F 60 - #define S6_GMAC_MACIPGIFG 0x008 61 - #define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0 62 - #define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F 63 - #define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8 64 - #define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16 65 - #define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24 66 - #define S6_GMAC_MACHALFDUPLEX 0x00C 67 - #define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0 68 - #define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F 69 - #define S6_GMAC_MACHALFDUPLEX_RETXMAX 12 70 - #define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F 71 - #define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16 72 - #define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17 73 - #define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18 74 - #define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19 75 - #define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20 76 - #define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F 77 - #define S6_GMAC_MACMAXFRAMELEN 0x010 78 - #define S6_GMAC_MACMIICONF 0x020 79 - #define S6_GMAC_MACMIICONF_CSEL 0 80 - #define S6_GMAC_MACMIICONF_CSEL_DIV10 0 81 - #define S6_GMAC_MACMIICONF_CSEL_DIV12 1 82 - #define S6_GMAC_MACMIICONF_CSEL_DIV14 2 83 - #define S6_GMAC_MACMIICONF_CSEL_DIV18 3 84 - #define S6_GMAC_MACMIICONF_CSEL_DIV24 4 85 - #define S6_GMAC_MACMIICONF_CSEL_DIV34 5 86 - #define S6_GMAC_MACMIICONF_CSEL_DIV68 6 87 - #define S6_GMAC_MACMIICONF_CSEL_DIV168 7 88 - #define S6_GMAC_MACMIICONF_CSEL_MASK 7 89 - #define S6_GMAC_MACMIICONF_PREAMBLESUPR 4 90 - #define S6_GMAC_MACMIICONF_SCANAUTOINCR 5 91 - #define S6_GMAC_MACMIICMD 0x024 92 - #define S6_GMAC_MACMIICMD_READ 0 93 - #define S6_GMAC_MACMIICMD_SCAN 1 94 - #define S6_GMAC_MACMIIADDR 0x028 95 - #define S6_GMAC_MACMIIADDR_REG 0 96 - #define S6_GMAC_MACMIIADDR_REG_MASK 0x1F 97 - #define S6_GMAC_MACMIIADDR_PHY 8 98 - #define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F 99 - #define S6_GMAC_MACMIICTRL 0x02C 100 - #define S6_GMAC_MACMIISTAT 0x030 101 - #define S6_GMAC_MACMIIINDI 0x034 102 - #define S6_GMAC_MACMIIINDI_BUSY 0 103 - #define S6_GMAC_MACMIIINDI_SCAN 1 104 - #define S6_GMAC_MACMIIINDI_INVAL 2 105 - #define S6_GMAC_MACINTERFSTAT 0x03C 106 - #define S6_GMAC_MACINTERFSTAT_LINKFAIL 3 107 - #define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9 108 - #define S6_GMAC_MACSTATADDR1 0x040 109 - #define S6_GMAC_MACSTATADDR2 0x044 110 - 111 - #define S6_GMAC_FIFOCONF0 0x048 112 - #define S6_GMAC_FIFOCONF0_HSTRSTWT 0 113 - #define S6_GMAC_FIFOCONF0_HSTRSTSR 1 114 - #define S6_GMAC_FIFOCONF0_HSTRSTFR 2 115 - #define S6_GMAC_FIFOCONF0_HSTRSTST 3 116 - #define S6_GMAC_FIFOCONF0_HSTRSTFT 4 117 - #define S6_GMAC_FIFOCONF0_WTMENREQ 8 118 - #define S6_GMAC_FIFOCONF0_SRFENREQ 9 119 - #define S6_GMAC_FIFOCONF0_FRFENREQ 10 120 - #define S6_GMAC_FIFOCONF0_STFENREQ 11 121 - #define S6_GMAC_FIFOCONF0_FTFENREQ 12 122 - #define S6_GMAC_FIFOCONF0_WTMENRPLY 16 123 - #define S6_GMAC_FIFOCONF0_SRFENRPLY 17 124 - #define S6_GMAC_FIFOCONF0_FRFENRPLY 18 125 - #define S6_GMAC_FIFOCONF0_STFENRPLY 19 126 - #define S6_GMAC_FIFOCONF0_FTFENRPLY 20 127 - #define S6_GMAC_FIFOCONF1 0x04C 128 - #define S6_GMAC_FIFOCONF2 0x050 129 - #define S6_GMAC_FIFOCONF2_CFGLWM 0 130 - #define S6_GMAC_FIFOCONF2_CFGHWM 16 131 - #define S6_GMAC_FIFOCONF3 0x054 132 - #define S6_GMAC_FIFOCONF3_CFGFTTH 0 133 - #define S6_GMAC_FIFOCONF3_CFGHWMFT 16 134 - #define S6_GMAC_FIFOCONF4 0x058 135 - #define S6_GMAC_FIFOCONF_RSV_PREVDROP 0 136 - #define S6_GMAC_FIFOCONF_RSV_RUNT 1 137 - #define S6_GMAC_FIFOCONF_RSV_FALSECAR 2 138 - #define S6_GMAC_FIFOCONF_RSV_CODEERR 3 139 - #define S6_GMAC_FIFOCONF_RSV_CRCERR 4 140 - #define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5 141 - #define S6_GMAC_FIFOCONF_RSV_LENRANGE 6 142 - #define S6_GMAC_FIFOCONF_RSV_OK 7 143 - #define S6_GMAC_FIFOCONF_RSV_MULTICAST 8 144 - #define S6_GMAC_FIFOCONF_RSV_BROADCAST 9 145 - #define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10 146 - #define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11 147 - #define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12 148 - #define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13 149 - #define S6_GMAC_FIFOCONF_RSV_VLANTAG 14 150 - #define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15 151 - #define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16 152 - #define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF 153 - #define S6_GMAC_FIFOCONF5 0x05C 154 - #define S6_GMAC_FIFOCONF5_DROPLT64 18 155 - #define S6_GMAC_FIFOCONF5_CFGBYTM 19 156 - #define S6_GMAC_FIFOCONF5_RXDROPSIZE 20 157 - #define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF 158 - 159 - #define S6_GMAC_STAT_REGS 0x080 160 - #define S6_GMAC_STAT_SIZE_MIN 12 161 - #define S6_GMAC_STATTR64 0x080 162 - #define S6_GMAC_STATTR64_SIZE 18 163 - #define S6_GMAC_STATTR127 0x084 164 - #define S6_GMAC_STATTR127_SIZE 18 165 - #define S6_GMAC_STATTR255 0x088 166 - #define S6_GMAC_STATTR255_SIZE 18 167 - #define S6_GMAC_STATTR511 0x08C 168 - #define S6_GMAC_STATTR511_SIZE 18 169 - #define S6_GMAC_STATTR1K 0x090 170 - #define S6_GMAC_STATTR1K_SIZE 18 171 - #define S6_GMAC_STATTRMAX 0x094 172 - #define S6_GMAC_STATTRMAX_SIZE 18 173 - #define S6_GMAC_STATTRMGV 0x098 174 - #define S6_GMAC_STATTRMGV_SIZE 18 175 - #define S6_GMAC_STATRBYT 0x09C 176 - #define S6_GMAC_STATRBYT_SIZE 24 177 - #define S6_GMAC_STATRPKT 0x0A0 178 - #define S6_GMAC_STATRPKT_SIZE 18 179 - #define S6_GMAC_STATRFCS 0x0A4 180 - #define S6_GMAC_STATRFCS_SIZE 12 181 - #define S6_GMAC_STATRMCA 0x0A8 182 - #define S6_GMAC_STATRMCA_SIZE 18 183 - #define S6_GMAC_STATRBCA 0x0AC 184 - #define S6_GMAC_STATRBCA_SIZE 22 185 - #define S6_GMAC_STATRXCF 0x0B0 186 - #define S6_GMAC_STATRXCF_SIZE 18 187 - #define S6_GMAC_STATRXPF 0x0B4 188 - #define S6_GMAC_STATRXPF_SIZE 12 189 - #define S6_GMAC_STATRXUO 0x0B8 190 - #define S6_GMAC_STATRXUO_SIZE 12 191 - #define S6_GMAC_STATRALN 0x0BC 192 - #define S6_GMAC_STATRALN_SIZE 12 193 - #define S6_GMAC_STATRFLR 0x0C0 194 - #define S6_GMAC_STATRFLR_SIZE 16 195 - #define S6_GMAC_STATRCDE 0x0C4 196 - #define S6_GMAC_STATRCDE_SIZE 12 197 - #define S6_GMAC_STATRCSE 0x0C8 198 - #define S6_GMAC_STATRCSE_SIZE 12 199 - #define S6_GMAC_STATRUND 0x0CC 200 - #define S6_GMAC_STATRUND_SIZE 12 201 - #define S6_GMAC_STATROVR 0x0D0 202 - #define S6_GMAC_STATROVR_SIZE 12 203 - #define S6_GMAC_STATRFRG 0x0D4 204 - #define S6_GMAC_STATRFRG_SIZE 12 205 - #define S6_GMAC_STATRJBR 0x0D8 206 - #define S6_GMAC_STATRJBR_SIZE 12 207 - #define S6_GMAC_STATRDRP 0x0DC 208 - #define S6_GMAC_STATRDRP_SIZE 12 209 - #define S6_GMAC_STATTBYT 0x0E0 210 - #define S6_GMAC_STATTBYT_SIZE 24 211 - #define S6_GMAC_STATTPKT 0x0E4 212 - #define S6_GMAC_STATTPKT_SIZE 18 213 - #define S6_GMAC_STATTMCA 0x0E8 214 - #define S6_GMAC_STATTMCA_SIZE 18 215 - #define S6_GMAC_STATTBCA 0x0EC 216 - #define S6_GMAC_STATTBCA_SIZE 18 217 - #define S6_GMAC_STATTXPF 0x0F0 218 - #define S6_GMAC_STATTXPF_SIZE 12 219 - #define S6_GMAC_STATTDFR 0x0F4 220 - #define S6_GMAC_STATTDFR_SIZE 12 221 - #define S6_GMAC_STATTEDF 0x0F8 222 - #define S6_GMAC_STATTEDF_SIZE 12 223 - #define S6_GMAC_STATTSCL 0x0FC 224 - #define S6_GMAC_STATTSCL_SIZE 12 225 - #define S6_GMAC_STATTMCL 0x100 226 - #define S6_GMAC_STATTMCL_SIZE 12 227 - #define S6_GMAC_STATTLCL 0x104 228 - #define S6_GMAC_STATTLCL_SIZE 12 229 - #define S6_GMAC_STATTXCL 0x108 230 - #define S6_GMAC_STATTXCL_SIZE 12 231 - #define S6_GMAC_STATTNCL 0x10C 232 - #define S6_GMAC_STATTNCL_SIZE 13 233 - #define S6_GMAC_STATTPFH 0x110 234 - #define S6_GMAC_STATTPFH_SIZE 12 235 - #define S6_GMAC_STATTDRP 0x114 236 - #define S6_GMAC_STATTDRP_SIZE 12 237 - #define S6_GMAC_STATTJBR 0x118 238 - #define S6_GMAC_STATTJBR_SIZE 12 239 - #define S6_GMAC_STATTFCS 0x11C 240 - #define S6_GMAC_STATTFCS_SIZE 12 241 - #define S6_GMAC_STATTXCF 0x120 242 - #define S6_GMAC_STATTXCF_SIZE 12 243 - #define S6_GMAC_STATTOVR 0x124 244 - #define S6_GMAC_STATTOVR_SIZE 12 245 - #define S6_GMAC_STATTUND 0x128 246 - #define S6_GMAC_STATTUND_SIZE 12 247 - #define S6_GMAC_STATTFRG 0x12C 248 - #define S6_GMAC_STATTFRG_SIZE 12 249 - #define S6_GMAC_STATCARRY(n) (0x130 + 4*(n)) 250 - #define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n)) 251 - #define S6_GMAC_STATCARRY1_RDRP 0 252 - #define S6_GMAC_STATCARRY1_RJBR 1 253 - #define S6_GMAC_STATCARRY1_RFRG 2 254 - #define S6_GMAC_STATCARRY1_ROVR 3 255 - #define S6_GMAC_STATCARRY1_RUND 4 256 - #define S6_GMAC_STATCARRY1_RCSE 5 257 - #define S6_GMAC_STATCARRY1_RCDE 6 258 - #define S6_GMAC_STATCARRY1_RFLR 7 259 - #define S6_GMAC_STATCARRY1_RALN 8 260 - #define S6_GMAC_STATCARRY1_RXUO 9 261 - #define S6_GMAC_STATCARRY1_RXPF 10 262 - #define S6_GMAC_STATCARRY1_RXCF 11 263 - #define S6_GMAC_STATCARRY1_RBCA 12 264 - #define S6_GMAC_STATCARRY1_RMCA 13 265 - #define S6_GMAC_STATCARRY1_RFCS 14 266 - #define S6_GMAC_STATCARRY1_RPKT 15 267 - #define S6_GMAC_STATCARRY1_RBYT 16 268 - #define S6_GMAC_STATCARRY1_TRMGV 25 269 - #define S6_GMAC_STATCARRY1_TRMAX 26 270 - #define S6_GMAC_STATCARRY1_TR1K 27 271 - #define S6_GMAC_STATCARRY1_TR511 28 272 - #define S6_GMAC_STATCARRY1_TR255 29 273 - #define S6_GMAC_STATCARRY1_TR127 30 274 - #define S6_GMAC_STATCARRY1_TR64 31 275 - #define S6_GMAC_STATCARRY2_TDRP 0 276 - #define S6_GMAC_STATCARRY2_TPFH 1 277 - #define S6_GMAC_STATCARRY2_TNCL 2 278 - #define S6_GMAC_STATCARRY2_TXCL 3 279 - #define S6_GMAC_STATCARRY2_TLCL 4 280 - #define S6_GMAC_STATCARRY2_TMCL 5 281 - #define S6_GMAC_STATCARRY2_TSCL 6 282 - #define S6_GMAC_STATCARRY2_TEDF 7 283 - #define S6_GMAC_STATCARRY2_TDFR 8 284 - #define S6_GMAC_STATCARRY2_TXPF 9 285 - #define S6_GMAC_STATCARRY2_TBCA 10 286 - #define S6_GMAC_STATCARRY2_TMCA 11 287 - #define S6_GMAC_STATCARRY2_TPKT 12 288 - #define S6_GMAC_STATCARRY2_TBYT 13 289 - #define S6_GMAC_STATCARRY2_TFRG 14 290 - #define S6_GMAC_STATCARRY2_TUND 15 291 - #define S6_GMAC_STATCARRY2_TOVR 16 292 - #define S6_GMAC_STATCARRY2_TXCF 17 293 - #define S6_GMAC_STATCARRY2_TFCS 18 294 - #define S6_GMAC_STATCARRY2_TJBR 19 295 - 296 - #define S6_GMAC_HOST_PBLKCTRL 0x140 297 - #define S6_GMAC_HOST_PBLKCTRL_TXENA 0 298 - #define S6_GMAC_HOST_PBLKCTRL_RXENA 1 299 - #define S6_GMAC_HOST_PBLKCTRL_TXSRES 2 300 - #define S6_GMAC_HOST_PBLKCTRL_RXSRES 3 301 - #define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8 302 - #define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12 303 - #define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4 304 - #define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5 305 - #define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6 306 - #define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7 307 - #define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF 308 - #define S6_GMAC_HOST_PBLKCTRL_STATENA 16 309 - #define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17 310 - #define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18 311 - #define S6_GMAC_HOST_PBLKCTRL_RGMII 19 312 - #define S6_GMAC_HOST_INTMASK 0x144 313 - #define S6_GMAC_HOST_INTSTAT 0x148 314 - #define S6_GMAC_HOST_INT_TXBURSTOVER 3 315 - #define S6_GMAC_HOST_INT_TXPREWOVER 4 316 - #define S6_GMAC_HOST_INT_RXBURSTUNDER 5 317 - #define S6_GMAC_HOST_INT_RXPOSTRFULL 6 318 - #define S6_GMAC_HOST_INT_RXPOSTRUNDER 7 319 - #define S6_GMAC_HOST_RXFIFOHWM 0x14C 320 - #define S6_GMAC_HOST_CTRLFRAMXP 0x150 321 - #define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n)) 322 - #define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n)) 323 - #define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n)) 324 - #define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n)) 325 - 326 - #define S6_GMAC_BURST_PREWR 0x1B0 327 - #define S6_GMAC_BURST_PREWR_LEN 0 328 - #define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1) 329 - #define S6_GMAC_BURST_PREWR_CFE 20 330 - #define S6_GMAC_BURST_PREWR_PPE 21 331 - #define S6_GMAC_BURST_PREWR_FCS 22 332 - #define S6_GMAC_BURST_PREWR_PAD 23 333 - #define S6_GMAC_BURST_POSTRD 0x1D0 334 - #define S6_GMAC_BURST_POSTRD_LEN 0 335 - #define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1) 336 - #define S6_GMAC_BURST_POSTRD_DROP 20 337 - 338 - 339 - /* data handling */ 340 - 341 - #define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */ 342 - #define S6_NUM_RX_SKB 16 343 - #define S6_MAX_FRLEN 1536 344 - 345 - struct s6gmac { 346 - u32 reg; 347 - u32 tx_dma; 348 - u32 rx_dma; 349 - u32 io; 350 - u8 tx_chan; 351 - u8 rx_chan; 352 - spinlock_t lock; 353 - u8 tx_skb_i, tx_skb_o; 354 - u8 rx_skb_i, rx_skb_o; 355 - struct sk_buff *tx_skb[S6_NUM_TX_SKB]; 356 - struct sk_buff *rx_skb[S6_NUM_RX_SKB]; 357 - unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)]; 358 - unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)]; 359 - struct phy_device *phydev; 360 - struct { 361 - struct mii_bus *bus; 362 - int irq[PHY_MAX_ADDR]; 363 - } mii; 364 - struct { 365 - unsigned int mbit; 366 - u8 giga; 367 - u8 isup; 368 - u8 full; 369 - } link; 370 - }; 371 - 372 - static void s6gmac_rx_fillfifo(struct net_device *dev) 373 - { 374 - struct s6gmac *pd = netdev_priv(dev); 375 - struct sk_buff *skb; 376 - while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) && 377 - (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) && 378 - (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) { 379 - pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb; 380 - s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan, 381 - pd->io, (u32)skb->data, S6_MAX_FRLEN); 382 - } 383 - } 384 - 385 - static void s6gmac_rx_interrupt(struct net_device *dev) 386 - { 387 - struct s6gmac *pd = netdev_priv(dev); 388 - u32 pfx; 389 - struct sk_buff *skb; 390 - while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) > 391 - s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) { 392 - skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]; 393 - pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD); 394 - if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) { 395 - dev_kfree_skb_irq(skb); 396 - } else { 397 - skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) 398 - & S6_GMAC_BURST_POSTRD_LEN_MASK); 399 - skb->protocol = eth_type_trans(skb, dev); 400 - skb->ip_summed = CHECKSUM_UNNECESSARY; 401 - netif_rx(skb); 402 - } 403 - } 404 - } 405 - 406 - static void s6gmac_tx_interrupt(struct net_device *dev) 407 - { 408 - struct s6gmac *pd = netdev_priv(dev); 409 - while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) > 410 - s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) { 411 - dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); 412 - } 413 - if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) 414 - netif_wake_queue(dev); 415 - } 416 - 417 - struct s6gmac_statinf { 418 - unsigned reg_size : 4; /* 0: unused */ 419 - unsigned reg_off : 6; 420 - unsigned net_index : 6; 421 - }; 422 - 423 - #define S6_STATS_B (8 * sizeof(u32)) 424 - #define S6_STATS_C(b, r, f) [b] = { \ 425 - BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \ 426 - BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \ 427 - >= (1<<4)) + \ 428 - r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \ 429 - BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \ 430 - >= ((1<<6)-1)) + \ 431 - (r - S6_GMAC_STAT_REGS) / sizeof(u32), \ 432 - BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \ 433 - % sizeof(unsigned long)) + \ 434 - BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \ 435 - / sizeof(unsigned long)) >= (1<<6))) + \ 436 - BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \ 437 - != sizeof(unsigned long))) + \ 438 - (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)}, 439 - 440 - static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { { 441 - S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes) 442 - S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets) 443 - S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors) 444 - S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast) 445 - S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors) 446 - S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors) 447 - S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors) 448 - S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors) 449 - S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors) 450 - S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors) 451 - S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors) 452 - S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped) 453 - }, { 454 - S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes) 455 - S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets) 456 - S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors) 457 - S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors) 458 - S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions) 459 - S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped) 460 - S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors) 461 - S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors) 462 - S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors) 463 - S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors) 464 - S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors) 465 - } }; 466 - 467 - static void s6gmac_stats_collect(struct s6gmac *pd, 468 - const struct s6gmac_statinf *inf) 469 - { 470 - int b; 471 - for (b = 0; b < S6_STATS_B; b++) { 472 - if (inf[b].reg_size) { 473 - pd->stats[inf[b].net_index] += 474 - readl(pd->reg + S6_GMAC_STAT_REGS 475 - + sizeof(u32) * inf[b].reg_off); 476 - } 477 - } 478 - } 479 - 480 - static void s6gmac_stats_carry(struct s6gmac *pd, 481 - const struct s6gmac_statinf *inf, u32 mask) 482 - { 483 - int b; 484 - while (mask) { 485 - b = fls(mask) - 1; 486 - mask &= ~(1 << b); 487 - pd->carry[inf[b].net_index] += (1 << inf[b].reg_size); 488 - } 489 - } 490 - 491 - static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry) 492 - { 493 - int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) & 494 - ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry)); 495 - return r; 496 - } 497 - 498 - static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry) 499 - { 500 - u32 mask; 501 - mask = s6gmac_stats_pending(pd, carry); 502 - if (mask) { 503 - writel(mask, pd->reg + S6_GMAC_STATCARRY(carry)); 504 - s6gmac_stats_carry(pd, &statinf[carry][0], mask); 505 - } 506 - } 507 - 508 - static irqreturn_t s6gmac_interrupt(int irq, void *dev_id) 509 - { 510 - struct net_device *dev = (struct net_device *)dev_id; 511 - struct s6gmac *pd = netdev_priv(dev); 512 - if (!dev) 513 - return IRQ_NONE; 514 - spin_lock(&pd->lock); 515 - if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan)) 516 - s6gmac_rx_interrupt(dev); 517 - s6gmac_rx_fillfifo(dev); 518 - if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan)) 519 - s6gmac_tx_interrupt(dev); 520 - s6gmac_stats_interrupt(pd, 0); 521 - s6gmac_stats_interrupt(pd, 1); 522 - spin_unlock(&pd->lock); 523 - return IRQ_HANDLED; 524 - } 525 - 526 - static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n, 527 - u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi) 528 - { 529 - writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n)); 530 - writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n)); 531 - writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n)); 532 - writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n)); 533 - } 534 - 535 - static inline void s6gmac_stop_device(struct net_device *dev) 536 - { 537 - struct s6gmac *pd = netdev_priv(dev); 538 - writel(0, pd->reg + S6_GMAC_MACCONF1); 539 - } 540 - 541 - static inline void s6gmac_init_device(struct net_device *dev) 542 - { 543 - struct s6gmac *pd = netdev_priv(dev); 544 - int is_rgmii = !!(pd->phydev->supported 545 - & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)); 546 - #if 0 547 - writel(1 << S6_GMAC_MACCONF1_SYNCTX | 548 - 1 << S6_GMAC_MACCONF1_SYNCRX | 549 - 1 << S6_GMAC_MACCONF1_TXFLOWCTRL | 550 - 1 << S6_GMAC_MACCONF1_RXFLOWCTRL | 551 - 1 << S6_GMAC_MACCONF1_RESTXFUNC | 552 - 1 << S6_GMAC_MACCONF1_RESRXFUNC | 553 - 1 << S6_GMAC_MACCONF1_RESTXMACCTRL | 554 - 1 << S6_GMAC_MACCONF1_RESRXMACCTRL, 555 - pd->reg + S6_GMAC_MACCONF1); 556 - #endif 557 - writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1); 558 - udelay(1000); 559 - writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA, 560 - pd->reg + S6_GMAC_MACCONF1); 561 - writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES | 562 - 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES, 563 - pd->reg + S6_GMAC_HOST_PBLKCTRL); 564 - writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | 565 - S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | 566 - 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | 567 - 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | 568 - is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, 569 - pd->reg + S6_GMAC_HOST_PBLKCTRL); 570 - writel(1 << S6_GMAC_MACCONF1_TXENA | 571 - 1 << S6_GMAC_MACCONF1_RXENA | 572 - (dev->flags & IFF_LOOPBACK ? 1 : 0) 573 - << S6_GMAC_MACCONF1_LOOPBACK, 574 - pd->reg + S6_GMAC_MACCONF1); 575 - writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ? 576 - dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN, 577 - pd->reg + S6_GMAC_MACMAXFRAMELEN); 578 - writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL | 579 - 1 << S6_GMAC_MACCONF2_PADCRCENA | 580 - 1 << S6_GMAC_MACCONF2_LENGTHFCHK | 581 - (pd->link.giga ? 582 - S6_GMAC_MACCONF2_IFMODE_BYTE : 583 - S6_GMAC_MACCONF2_IFMODE_NIBBLE) 584 - << S6_GMAC_MACCONF2_IFMODE | 585 - 7 << S6_GMAC_MACCONF2_PREAMBLELEN, 586 - pd->reg + S6_GMAC_MACCONF2); 587 - writel(0, pd->reg + S6_GMAC_MACSTATADDR1); 588 - writel(0, pd->reg + S6_GMAC_MACSTATADDR2); 589 - writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ | 590 - 1 << S6_GMAC_FIFOCONF0_SRFENREQ | 591 - 1 << S6_GMAC_FIFOCONF0_FRFENREQ | 592 - 1 << S6_GMAC_FIFOCONF0_STFENREQ | 593 - 1 << S6_GMAC_FIFOCONF0_FTFENREQ, 594 - pd->reg + S6_GMAC_FIFOCONF0); 595 - writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH | 596 - 128 << S6_GMAC_FIFOCONF3_CFGHWMFT, 597 - pd->reg + S6_GMAC_FIFOCONF3); 598 - writel((S6_GMAC_FIFOCONF_RSV_MASK & ~( 599 - 1 << S6_GMAC_FIFOCONF_RSV_RUNT | 600 - 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | 601 - 1 << S6_GMAC_FIFOCONF_RSV_OK | 602 - 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | 603 - 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | 604 - 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | 605 - 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | 606 - 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) | 607 - 1 << S6_GMAC_FIFOCONF5_DROPLT64 | 608 - pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM | 609 - 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE, 610 - pd->reg + S6_GMAC_FIFOCONF5); 611 - writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT | 612 - 1 << S6_GMAC_FIFOCONF_RSV_CRCERR | 613 - 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE | 614 - 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME | 615 - 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL | 616 - 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE | 617 - 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED, 618 - pd->reg + S6_GMAC_FIFOCONF4); 619 - s6gmac_set_dstaddr(pd, 0, 620 - 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF); 621 - s6gmac_set_dstaddr(pd, 1, 622 - dev->dev_addr[5] | 623 - dev->dev_addr[4] << 8 | 624 - dev->dev_addr[3] << 16 | 625 - dev->dev_addr[2] << 24, 626 - dev->dev_addr[1] | 627 - dev->dev_addr[0] << 8, 628 - 0xFFFFFFFF, 0x0000FFFF); 629 - s6gmac_set_dstaddr(pd, 2, 630 - 0x00000000, 0x00000100, 0x00000000, 0x00000100); 631 - s6gmac_set_dstaddr(pd, 3, 632 - 0x00000000, 0x00000000, 0x00000000, 0x00000000); 633 - writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA | 634 - 1 << S6_GMAC_HOST_PBLKCTRL_RXENA | 635 - S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ | 636 - S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ | 637 - 1 << S6_GMAC_HOST_PBLKCTRL_STATENA | 638 - 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR | 639 - is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII, 640 - pd->reg + S6_GMAC_HOST_PBLKCTRL); 641 - } 642 - 643 - static void s6mii_enable(struct s6gmac *pd) 644 - { 645 - writel(readl(pd->reg + S6_GMAC_MACCONF1) & 646 - ~(1 << S6_GMAC_MACCONF1_SOFTRES), 647 - pd->reg + S6_GMAC_MACCONF1); 648 - writel((readl(pd->reg + S6_GMAC_MACMIICONF) 649 - & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL)) 650 - | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL), 651 - pd->reg + S6_GMAC_MACMIICONF); 652 - } 653 - 654 - static int s6mii_busy(struct s6gmac *pd, int tmo) 655 - { 656 - while (readl(pd->reg + S6_GMAC_MACMIIINDI)) { 657 - if (--tmo == 0) 658 - return -ETIME; 659 - udelay(64); 660 - } 661 - return 0; 662 - } 663 - 664 - static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum) 665 - { 666 - struct s6gmac *pd = bus->priv; 667 - s6mii_enable(pd); 668 - if (s6mii_busy(pd, 256)) 669 - return -ETIME; 670 - writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | 671 - regnum << S6_GMAC_MACMIIADDR_REG, 672 - pd->reg + S6_GMAC_MACMIIADDR); 673 - writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD); 674 - writel(0, pd->reg + S6_GMAC_MACMIICMD); 675 - if (s6mii_busy(pd, 256)) 676 - return -ETIME; 677 - return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT); 678 - } 679 - 680 - static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) 681 - { 682 - struct s6gmac *pd = bus->priv; 683 - s6mii_enable(pd); 684 - if (s6mii_busy(pd, 256)) 685 - return -ETIME; 686 - writel(phy_addr << S6_GMAC_MACMIIADDR_PHY | 687 - regnum << S6_GMAC_MACMIIADDR_REG, 688 - pd->reg + S6_GMAC_MACMIIADDR); 689 - writel(value, pd->reg + S6_GMAC_MACMIICTRL); 690 - if (s6mii_busy(pd, 256)) 691 - return -ETIME; 692 - return 0; 693 - } 694 - 695 - static int s6mii_reset(struct mii_bus *bus) 696 - { 697 - struct s6gmac *pd = bus->priv; 698 - s6mii_enable(pd); 699 - if (s6mii_busy(pd, PHY_INIT_TIMEOUT)) 700 - return -ETIME; 701 - return 0; 702 - } 703 - 704 - static void s6gmac_set_rgmii_txclock(struct s6gmac *pd) 705 - { 706 - u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL); 707 - pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC); 708 - switch (pd->link.mbit) { 709 - case 10: 710 - pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC; 711 - break; 712 - case 100: 713 - pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC; 714 - break; 715 - case 1000: 716 - pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC; 717 - break; 718 - default: 719 - return; 720 - } 721 - writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL); 722 - } 723 - 724 - static inline void s6gmac_linkisup(struct net_device *dev, int isup) 725 - { 726 - struct s6gmac *pd = netdev_priv(dev); 727 - struct phy_device *phydev = pd->phydev; 728 - 729 - pd->link.full = phydev->duplex; 730 - pd->link.giga = (phydev->speed == 1000); 731 - if (pd->link.mbit != phydev->speed) { 732 - pd->link.mbit = phydev->speed; 733 - s6gmac_set_rgmii_txclock(pd); 734 - } 735 - pd->link.isup = isup; 736 - if (isup) 737 - netif_carrier_on(dev); 738 - phy_print_status(phydev); 739 - } 740 - 741 - static void s6gmac_adjust_link(struct net_device *dev) 742 - { 743 - struct s6gmac *pd = netdev_priv(dev); 744 - struct phy_device *phydev = pd->phydev; 745 - if (pd->link.isup && 746 - (!phydev->link || 747 - (pd->link.mbit != phydev->speed) || 748 - (pd->link.full != phydev->duplex))) { 749 - pd->link.isup = 0; 750 - netif_tx_disable(dev); 751 - if (!phydev->link) { 752 - netif_carrier_off(dev); 753 - phy_print_status(phydev); 754 - } 755 - } 756 - if (!pd->link.isup && phydev->link) { 757 - if (pd->link.full != phydev->duplex) { 758 - u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); 759 - if (phydev->duplex) 760 - maccfg |= 1 << S6_GMAC_MACCONF2_FULL; 761 - else 762 - maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL); 763 - writel(maccfg, pd->reg + S6_GMAC_MACCONF2); 764 - } 765 - 766 - if (pd->link.giga != (phydev->speed == 1000)) { 767 - u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5); 768 - u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2); 769 - maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK 770 - << S6_GMAC_MACCONF2_IFMODE); 771 - if (phydev->speed == 1000) { 772 - fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM; 773 - maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE 774 - << S6_GMAC_MACCONF2_IFMODE; 775 - } else { 776 - fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM); 777 - maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE 778 - << S6_GMAC_MACCONF2_IFMODE; 779 - } 780 - writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5); 781 - writel(maccfg, pd->reg + S6_GMAC_MACCONF2); 782 - } 783 - 784 - if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) 785 - netif_wake_queue(dev); 786 - s6gmac_linkisup(dev, 1); 787 - } 788 - } 789 - 790 - static inline int s6gmac_phy_start(struct net_device *dev) 791 - { 792 - struct s6gmac *pd = netdev_priv(dev); 793 - int i = 0; 794 - struct phy_device *p = NULL; 795 - while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) 796 - i++; 797 - p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 798 - PHY_INTERFACE_MODE_RGMII); 799 - if (IS_ERR(p)) { 800 - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 801 - return PTR_ERR(p); 802 - } 803 - p->supported &= PHY_GBIT_FEATURES; 804 - p->advertising = p->supported; 805 - pd->phydev = p; 806 - return 0; 807 - } 808 - 809 - static inline void s6gmac_init_stats(struct net_device *dev) 810 - { 811 - struct s6gmac *pd = netdev_priv(dev); 812 - u32 mask; 813 - mask = 1 << S6_GMAC_STATCARRY1_RDRP | 814 - 1 << S6_GMAC_STATCARRY1_RJBR | 815 - 1 << S6_GMAC_STATCARRY1_RFRG | 816 - 1 << S6_GMAC_STATCARRY1_ROVR | 817 - 1 << S6_GMAC_STATCARRY1_RUND | 818 - 1 << S6_GMAC_STATCARRY1_RCDE | 819 - 1 << S6_GMAC_STATCARRY1_RFLR | 820 - 1 << S6_GMAC_STATCARRY1_RALN | 821 - 1 << S6_GMAC_STATCARRY1_RMCA | 822 - 1 << S6_GMAC_STATCARRY1_RFCS | 823 - 1 << S6_GMAC_STATCARRY1_RPKT | 824 - 1 << S6_GMAC_STATCARRY1_RBYT; 825 - writel(mask, pd->reg + S6_GMAC_STATCARRY(0)); 826 - writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0)); 827 - mask = 1 << S6_GMAC_STATCARRY2_TDRP | 828 - 1 << S6_GMAC_STATCARRY2_TNCL | 829 - 1 << S6_GMAC_STATCARRY2_TXCL | 830 - 1 << S6_GMAC_STATCARRY2_TEDF | 831 - 1 << S6_GMAC_STATCARRY2_TPKT | 832 - 1 << S6_GMAC_STATCARRY2_TBYT | 833 - 1 << S6_GMAC_STATCARRY2_TFRG | 834 - 1 << S6_GMAC_STATCARRY2_TUND | 835 - 1 << S6_GMAC_STATCARRY2_TOVR | 836 - 1 << S6_GMAC_STATCARRY2_TFCS | 837 - 1 << S6_GMAC_STATCARRY2_TJBR; 838 - writel(mask, pd->reg + S6_GMAC_STATCARRY(1)); 839 - writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1)); 840 - } 841 - 842 - static inline void s6gmac_init_dmac(struct net_device *dev) 843 - { 844 - struct s6gmac *pd = netdev_priv(dev); 845 - s6dmac_disable_chan(pd->tx_dma, pd->tx_chan); 846 - s6dmac_disable_chan(pd->rx_dma, pd->rx_chan); 847 - s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX); 848 - s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX); 849 - } 850 - 851 - static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev) 852 - { 853 - struct s6gmac *pd = netdev_priv(dev); 854 - unsigned long flags; 855 - 856 - spin_lock_irqsave(&pd->lock, flags); 857 - writel(skb->len << S6_GMAC_BURST_PREWR_LEN | 858 - 0 << S6_GMAC_BURST_PREWR_CFE | 859 - 1 << S6_GMAC_BURST_PREWR_PPE | 860 - 1 << S6_GMAC_BURST_PREWR_FCS | 861 - ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD, 862 - pd->reg + S6_GMAC_BURST_PREWR); 863 - s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan, 864 - (u32)skb->data, pd->io, skb->len); 865 - if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan)) 866 - netif_stop_queue(dev); 867 - if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) { 868 - printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n", 869 - pd->tx_skb_o, pd->tx_skb_i); 870 - BUG(); 871 - } 872 - pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb; 873 - spin_unlock_irqrestore(&pd->lock, flags); 874 - return 0; 875 - } 876 - 877 - static void s6gmac_tx_timeout(struct net_device *dev) 878 - { 879 - struct s6gmac *pd = netdev_priv(dev); 880 - unsigned long flags; 881 - spin_lock_irqsave(&pd->lock, flags); 882 - s6gmac_tx_interrupt(dev); 883 - spin_unlock_irqrestore(&pd->lock, flags); 884 - } 885 - 886 - static int s6gmac_open(struct net_device *dev) 887 - { 888 - struct s6gmac *pd = netdev_priv(dev); 889 - unsigned long flags; 890 - phy_read_status(pd->phydev); 891 - spin_lock_irqsave(&pd->lock, flags); 892 - pd->link.mbit = 0; 893 - s6gmac_linkisup(dev, pd->phydev->link); 894 - s6gmac_init_device(dev); 895 - s6gmac_init_stats(dev); 896 - s6gmac_init_dmac(dev); 897 - s6gmac_rx_fillfifo(dev); 898 - s6dmac_enable_chan(pd->rx_dma, pd->rx_chan, 899 - 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1); 900 - s6dmac_enable_chan(pd->tx_dma, pd->tx_chan, 901 - 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1); 902 - writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER | 903 - 0 << S6_GMAC_HOST_INT_TXPREWOVER | 904 - 0 << S6_GMAC_HOST_INT_RXBURSTUNDER | 905 - 0 << S6_GMAC_HOST_INT_RXPOSTRFULL | 906 - 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER, 907 - pd->reg + S6_GMAC_HOST_INTMASK); 908 - spin_unlock_irqrestore(&pd->lock, flags); 909 - phy_start(pd->phydev); 910 - netif_start_queue(dev); 911 - return 0; 912 - } 913 - 914 - static int s6gmac_stop(struct net_device *dev) 915 - { 916 - struct s6gmac *pd = netdev_priv(dev); 917 - unsigned long flags; 918 - netif_stop_queue(dev); 919 - phy_stop(pd->phydev); 920 - spin_lock_irqsave(&pd->lock, flags); 921 - s6gmac_init_dmac(dev); 922 - s6gmac_stop_device(dev); 923 - while (pd->tx_skb_i != pd->tx_skb_o) 924 - dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]); 925 - while (pd->rx_skb_i != pd->rx_skb_o) 926 - dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]); 927 - spin_unlock_irqrestore(&pd->lock, flags); 928 - return 0; 929 - } 930 - 931 - static struct net_device_stats *s6gmac_stats(struct net_device *dev) 932 - { 933 - struct s6gmac *pd = netdev_priv(dev); 934 - struct net_device_stats *st = (struct net_device_stats *)&pd->stats; 935 - int i; 936 - do { 937 - unsigned long flags; 938 - spin_lock_irqsave(&pd->lock, flags); 939 - for (i = 0; i < ARRAY_SIZE(pd->stats); i++) 940 - pd->stats[i] = 941 - pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); 942 - s6gmac_stats_collect(pd, &statinf[0][0]); 943 - s6gmac_stats_collect(pd, &statinf[1][0]); 944 - i = s6gmac_stats_pending(pd, 0) | 945 - s6gmac_stats_pending(pd, 1); 946 - spin_unlock_irqrestore(&pd->lock, flags); 947 - } while (i); 948 - st->rx_errors = st->rx_crc_errors + 949 - st->rx_frame_errors + 950 - st->rx_length_errors + 951 - st->rx_missed_errors; 952 - st->tx_errors += st->tx_aborted_errors; 953 - return st; 954 - } 955 - 956 - static int s6gmac_probe(struct platform_device *pdev) 957 - { 958 - struct net_device *dev; 959 - struct s6gmac *pd; 960 - int res; 961 - unsigned long i; 962 - struct mii_bus *mb; 963 - 964 - dev = alloc_etherdev(sizeof(*pd)); 965 - if (!dev) 966 - return -ENOMEM; 967 - 968 - dev->open = s6gmac_open; 969 - dev->stop = s6gmac_stop; 970 - dev->hard_start_xmit = s6gmac_tx; 971 - dev->tx_timeout = s6gmac_tx_timeout; 972 - dev->watchdog_timeo = HZ; 973 - dev->get_stats = s6gmac_stats; 974 - dev->irq = platform_get_irq(pdev, 0); 975 - pd = netdev_priv(dev); 976 - memset(pd, 0, sizeof(*pd)); 977 - spin_lock_init(&pd->lock); 978 - pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start; 979 - i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start; 980 - pd->tx_dma = DMA_MASK_DMAC(i); 981 - pd->tx_chan = DMA_INDEX_CHNL(i); 982 - i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start; 983 - pd->rx_dma = DMA_MASK_DMAC(i); 984 - pd->rx_chan = DMA_INDEX_CHNL(i); 985 - pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; 986 - res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev); 987 - if (res) { 988 - printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); 989 - goto errirq; 990 - } 991 - res = register_netdev(dev); 992 - if (res) { 993 - printk(KERN_ERR DRV_PRMT "error registering device %s\n", 994 - dev->name); 995 - goto errdev; 996 - } 997 - mb = mdiobus_alloc(); 998 - if (!mb) { 999 - printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); 1000 - res = -ENOMEM; 1001 - goto errmii; 1002 - } 1003 - mb->name = "s6gmac_mii"; 1004 - mb->read = s6mii_read; 1005 - mb->write = s6mii_write; 1006 - mb->reset = s6mii_reset; 1007 - mb->priv = pd; 1008 - snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); 1009 - mb->phy_mask = ~(1 << 0); 1010 - mb->irq = &pd->mii.irq[0]; 1011 - for (i = 0; i < PHY_MAX_ADDR; i++) { 1012 - int n = platform_get_irq(pdev, i + 1); 1013 - if (n < 0) 1014 - n = PHY_POLL; 1015 - pd->mii.irq[i] = n; 1016 - } 1017 - mdiobus_register(mb); 1018 - pd->mii.bus = mb; 1019 - res = s6gmac_phy_start(dev); 1020 - if (res) 1021 - return res; 1022 - platform_set_drvdata(pdev, dev); 1023 - return 0; 1024 - errmii: 1025 - unregister_netdev(dev); 1026 - errdev: 1027 - free_irq(dev->irq, dev); 1028 - errirq: 1029 - free_netdev(dev); 1030 - return res; 1031 - } 1032 - 1033 - static int s6gmac_remove(struct platform_device *pdev) 1034 - { 1035 - struct net_device *dev = platform_get_drvdata(pdev); 1036 - if (dev) { 1037 - struct s6gmac *pd = netdev_priv(dev); 1038 - mdiobus_unregister(pd->mii.bus); 1039 - unregister_netdev(dev); 1040 - free_irq(dev->irq, dev); 1041 - free_netdev(dev); 1042 - } 1043 - return 0; 1044 - } 1045 - 1046 - static struct platform_driver s6gmac_driver = { 1047 - .probe = s6gmac_probe, 1048 - .remove = s6gmac_remove, 1049 - .driver = { 1050 - .name = "s6gmac", 1051 - }, 1052 - }; 1053 - 1054 - module_platform_driver(s6gmac_driver); 1055 - 1056 - MODULE_LICENSE("GPL"); 1057 - MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); 1058 - MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
+8 -6
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1671 1671 * 0 on success and an appropriate (-)ve integer as defined in errno.h 1672 1672 * file on failure. 1673 1673 */ 1674 - static int stmmac_hw_setup(struct net_device *dev) 1674 + static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 1675 1675 { 1676 1676 struct stmmac_priv *priv = netdev_priv(dev); 1677 1677 int ret; ··· 1708 1708 1709 1709 stmmac_mmc_setup(priv); 1710 1710 1711 - ret = stmmac_init_ptp(priv); 1712 - if (ret && ret != -EOPNOTSUPP) 1713 - pr_warn("%s: failed PTP initialisation\n", __func__); 1711 + if (init_ptp) { 1712 + ret = stmmac_init_ptp(priv); 1713 + if (ret && ret != -EOPNOTSUPP) 1714 + pr_warn("%s: failed PTP initialisation\n", __func__); 1715 + } 1714 1716 1715 1717 #ifdef CONFIG_DEBUG_FS 1716 1718 ret = stmmac_init_fs(dev); ··· 1789 1787 goto init_error; 1790 1788 } 1791 1789 1792 - ret = stmmac_hw_setup(dev); 1790 + ret = stmmac_hw_setup(dev, true); 1793 1791 if (ret < 0) { 1794 1792 pr_err("%s: Hw setup failed\n", __func__); 1795 1793 goto init_error; ··· 3038 3036 netif_device_attach(ndev); 3039 3037 3040 3038 init_dma_desc_rings(ndev, GFP_ATOMIC); 3041 - stmmac_hw_setup(ndev); 3039 + stmmac_hw_setup(ndev, false); 3042 3040 stmmac_init_tx_coalesce(priv); 3043 3041 3044 3042 napi_enable(&priv->napi);
-1
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 430 430 .remove = stmmac_pltfr_remove, 431 431 .driver = { 432 432 .name = STMMAC_RESOURCE_NAME, 433 - .owner = THIS_MODULE, 434 433 .pm = &stmmac_pltfr_pm_ops, 435 434 .of_match_table = of_match_ptr(stmmac_dt_ids), 436 435 },
+1
drivers/net/ethernet/sun/sunvnet.c
··· 1201 1201 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1202 1202 if (IS_ERR(segs)) { 1203 1203 dev->stats.tx_dropped++; 1204 + dev_kfree_skb_any(skb); 1204 1205 return NETDEV_TX_OK; 1205 1206 } 1206 1207
+17 -13
drivers/net/ethernet/ti/cpsw.c
··· 610 610 611 611 /* Clear all mcast from ALE */ 612 612 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << 613 - priv->host_port); 613 + priv->host_port, -1); 614 614 615 615 /* Flood All Unicast Packets to Host port */ 616 616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); ··· 634 634 static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 635 635 { 636 636 struct cpsw_priv *priv = netdev_priv(ndev); 637 + int vid; 638 + 639 + if (priv->data.dual_emac) 640 + vid = priv->slaves[priv->emac_port].port_vlan; 641 + else 642 + vid = priv->data.default_vlan; 637 643 638 644 if (ndev->flags & IFF_PROMISC) { 639 645 /* Enable promiscuous mode */ ··· 655 649 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); 656 650 657 651 /* Clear all mcast from ALE */ 658 - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 652 + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, 653 + vid); 659 654 660 655 if (!netdev_mc_empty(ndev)) { 661 656 struct netdev_hw_addr *ha; ··· 764 757 static irqreturn_t cpsw_interrupt(int irq, void *dev_id) 765 758 { 766 759 struct cpsw_priv *priv = dev_id; 760 + int value = irq - priv->irqs_table[0]; 761 + 762 + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above 763 + * is to make sure we will always write the correct value to the EOI 764 + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 765 + * for TX Interrupt and 3 for MISC Interrupt. 766 + */ 767 + cpdma_ctlr_eoi(priv->dma, value); 767 768 768 769 cpsw_intr_disable(priv); 769 770 if (priv->irq_enabled == true) { ··· 801 786 int num_tx, num_rx; 802 787 803 788 num_tx = cpdma_chan_process(priv->txch, 128); 804 - if (num_tx) 805 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 806 789 807 790 num_rx = cpdma_chan_process(priv->rxch, budget); 808 791 if (num_rx < budget) { ··· 808 795 809 796 napi_complete(napi); 810 797 cpsw_intr_enable(priv); 811 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 812 798 prim_cpsw = cpsw_get_slave_priv(priv, 0); 813 799 if (prim_cpsw->irq_enabled == false) { 814 800 prim_cpsw->irq_enabled = true; ··· 1322 1310 napi_enable(&priv->napi); 1323 1311 cpdma_ctlr_start(priv->dma); 1324 1312 cpsw_intr_enable(priv); 1325 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1326 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1327 1313 1328 1314 prim_cpsw = cpsw_get_slave_priv(priv, 0); 1329 1315 if (prim_cpsw->irq_enabled == false) { ··· 1588 1578 cpdma_chan_start(priv->txch); 1589 1579 cpdma_ctlr_int_ctrl(priv->dma, true); 1590 1580 cpsw_intr_enable(priv); 1591 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1592 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1593 - 1594 1581 } 1595 1582 1596 1583 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) ··· 1627 1620 cpsw_interrupt(ndev->irq, priv); 1628 1621 cpdma_ctlr_int_ctrl(priv->dma, true); 1629 1622 cpsw_intr_enable(priv); 1630 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); 1631 - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); 1632 - 1633 1623 } 1634 1624 #endif 1635 1625
+9 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 234 234 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); 235 235 } 236 236 237 - int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) 237 + int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) 238 238 { 239 239 u32 ale_entry[ALE_ENTRY_WORDS]; 240 240 int ret, idx; ··· 243 243 cpsw_ale_read(ale, idx, ale_entry); 244 244 ret = cpsw_ale_get_entry_type(ale_entry); 245 245 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) 246 + continue; 247 + 248 + /* if vid passed is -1 then remove all multicast entry from 249 + * the table irrespective of vlan id, if a valid vlan id is 250 + * passed then remove only multicast added to that vlan id. 251 + * if vlan id doesn't match then move on to next entry. 252 + */ 253 + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) 246 254 continue; 247 255 248 256 if (cpsw_ale_get_mcast(ale_entry)) {
+1 -1
drivers/net/ethernet/ti/cpsw_ale.h
··· 92 92 93 93 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); 94 94 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); 95 - int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); 95 + int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); 96 96 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, 97 97 int flags, u16 vid); 98 98 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+2
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 1043 1043 lp->regs = of_iomap(op->dev.of_node, 0); 1044 1044 if (!lp->regs) { 1045 1045 dev_err(&op->dev, "could not map temac regs.\n"); 1046 + rc = -ENOMEM; 1046 1047 goto nodev; 1047 1048 } 1048 1049 ··· 1063 1062 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); 1064 1063 if (!np) { 1065 1064 dev_err(&op->dev, "could not find DMA node\n"); 1065 + rc = -ENODEV; 1066 1066 goto err_iounmap; 1067 1067 } 1068 1068
-2
drivers/net/ethernet/xilinx/xilinx_axienet.h
··· 388 388 * @dma_err_tasklet: Tasklet structure to process Axi DMA errors 389 389 * @tx_irq: Axidma TX IRQ number 390 390 * @rx_irq: Axidma RX IRQ number 391 - * @temac_type: axienet type to identify between soft and hard temac 392 391 * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X 393 392 * @options: AxiEthernet option word 394 393 * @last_link: Phy link state in which the PHY was negotiated earlier ··· 430 431 431 432 int tx_irq; 432 433 int rx_irq; 433 - u32 temac_type; 434 434 u32 phy_type; 435 435 436 436 u32 options; /* Current options word */
+2 -4
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1501 1501 lp->regs = of_iomap(op->dev.of_node, 0); 1502 1502 if (!lp->regs) { 1503 1503 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1504 + ret = -ENOMEM; 1504 1505 goto nodev; 1505 1506 } 1506 1507 /* Setup checksum offload, but default to off if not specified */ ··· 1556 1555 if ((be32_to_cpup(p)) >= 0x4000) 1557 1556 lp->jumbo_support = 1; 1558 1557 } 1559 - p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1560 - NULL); 1561 - if (p) 1562 - lp->temac_type = be32_to_cpup(p); 1563 1558 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1564 1559 if (p) 1565 1560 lp->phy_type = be32_to_cpup(p); ··· 1564 1567 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1565 1568 if (!np) { 1566 1569 dev_err(&op->dev, "could not find DMA node\n"); 1570 + ret = -ENODEV; 1567 1571 goto err_iounmap; 1568 1572 } 1569 1573 lp->dma_regs = of_iomap(np, 0);
+1
drivers/net/ethernet/xilinx/xilinx_emaclite.c
··· 1109 1109 res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); 1110 1110 if (!res) { 1111 1111 dev_err(dev, "no IRQ found\n"); 1112 + rc = -ENXIO; 1112 1113 goto error; 1113 1114 } 1114 1115
+1
drivers/net/hyperv/hyperv_net.h
··· 590 590 591 591 592 592 #define NETVSC_RECEIVE_BUFFER_ID 0xcafe 593 + #define NETVSC_SEND_BUFFER_ID 0 593 594 594 595 #define NETVSC_PACKET_SIZE 4096 595 596
+8 -7
drivers/net/hyperv/netvsc.c
··· 161 161 162 162 /* Deal with the send buffer we may have setup. 163 163 * If we got a send section size, it means we received a 164 - * SendsendBufferComplete msg (ie sent 165 - * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need 164 + * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent 165 + * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need 166 166 * to send a revoke msg here 167 167 */ 168 168 if (net_device->send_section_size) { ··· 172 172 173 173 revoke_packet->hdr.msg_type = 174 174 NVSP_MSG1_TYPE_REVOKE_SEND_BUF; 175 - revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0; 175 + revoke_packet->msg.v1_msg.revoke_send_buf.id = 176 + NETVSC_SEND_BUFFER_ID; 176 177 177 178 ret = vmbus_sendpacket(net_device->dev->channel, 178 179 revoke_packet, ··· 205 204 net_device->send_buf_gpadl_handle = 0; 206 205 } 207 206 if (net_device->send_buf) { 208 - /* Free up the receive buffer */ 207 + /* Free up the send buffer */ 209 208 vfree(net_device->send_buf); 210 209 net_device->send_buf = NULL; 211 210 } ··· 340 339 init_packet = &net_device->channel_init_pkt; 341 340 memset(init_packet, 0, sizeof(struct nvsp_message)); 342 341 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; 343 - init_packet->msg.v1_msg.send_recv_buf.gpadl_handle = 342 + init_packet->msg.v1_msg.send_send_buf.gpadl_handle = 344 343 net_device->send_buf_gpadl_handle; 345 - init_packet->msg.v1_msg.send_recv_buf.id = 0; 344 + init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; 346 345 347 346 /* Send the gpadl notification request */ 348 347 ret = vmbus_sendpacket(device->channel, init_packet, ··· 365 364 netdev_err(ndev, "Unable to complete send buffer " 366 365 "initialization with NetVsp - status %d\n", 367 366 init_packet->msg.v1_msg. 368 - send_recv_buf_complete.status); 367 + send_send_buf_complete.status); 369 368 ret = -EINVAL; 370 369 goto cleanup; 371 370 }
+3 -15
drivers/net/phy/micrel.c
··· 88 88 89 89 static const struct kszphy_type ksz8021_type = { 90 90 .led_mode_reg = MII_KSZPHY_CTRL_2, 91 + .has_broadcast_disable = true, 91 92 .has_rmii_ref_clk_sel = true, 92 93 }; 93 94 ··· 257 256 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 258 257 259 258 return 0; 260 - } 261 - 262 - static int ksz8021_config_init(struct phy_device *phydev) 263 - { 264 - int rc; 265 - 266 - rc = kszphy_config_init(phydev); 267 - if (rc) 268 - return rc; 269 - 270 - rc = kszphy_broadcast_disable(phydev); 271 - 272 - return rc < 0 ? rc : 0; 273 259 } 274 260 275 261 static int ksz9021_load_values_from_of(struct phy_device *phydev, ··· 572 584 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 573 585 .driver_data = &ksz8021_type, 574 586 .probe = kszphy_probe, 575 - .config_init = ksz8021_config_init, 587 + .config_init = kszphy_config_init, 576 588 .config_aneg = genphy_config_aneg, 577 589 .read_status = genphy_read_status, 578 590 .ack_interrupt = kszphy_ack_interrupt, ··· 589 601 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 590 602 .driver_data = &ksz8021_type, 591 603 .probe = kszphy_probe, 592 - .config_init = ksz8021_config_init, 604 + .config_init = kszphy_config_init, 593 605 .config_aneg = genphy_config_aneg, 594 606 .read_status = genphy_read_status, 595 607 .ack_interrupt = kszphy_ack_interrupt,
+14 -2
drivers/net/team/team.c
··· 629 629 static void team_notify_peers_work(struct work_struct *work) 630 630 { 631 631 struct team *team; 632 + int val; 632 633 633 634 team = container_of(work, struct team, notify_peers.dw.work); 634 635 ··· 637 636 schedule_delayed_work(&team->notify_peers.dw, 0); 638 637 return; 639 638 } 639 + val = atomic_dec_if_positive(&team->notify_peers.count_pending); 640 + if (val < 0) { 641 + rtnl_unlock(); 642 + return; 643 + } 640 644 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); 641 645 rtnl_unlock(); 642 - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) 646 + if (val) 643 647 schedule_delayed_work(&team->notify_peers.dw, 644 648 msecs_to_jiffies(team->notify_peers.interval)); 645 649 } ··· 675 669 static void team_mcast_rejoin_work(struct work_struct *work) 676 670 { 677 671 struct team *team; 672 + int val; 678 673 679 674 team = container_of(work, struct team, mcast_rejoin.dw.work); 680 675 ··· 683 676 schedule_delayed_work(&team->mcast_rejoin.dw, 0); 684 677 return; 685 678 } 679 + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); 680 + if (val < 0) { 681 + rtnl_unlock(); 682 + return; 683 + } 686 684 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); 687 685 rtnl_unlock(); 688 - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) 686 + if (val) 689 687 schedule_delayed_work(&team->mcast_rejoin.dw, 690 688 msecs_to_jiffies(team->mcast_rejoin.interval)); 691 689 }
+1 -1
drivers/net/usb/kaweth.c
··· 1276 1276 awd.done = 0; 1277 1277 1278 1278 urb->context = &awd; 1279 - status = usb_submit_urb(urb, GFP_NOIO); 1279 + status = usb_submit_urb(urb, GFP_ATOMIC); 1280 1280 if (status) { 1281 1281 // something went wrong 1282 1282 usb_free_urb(urb);
+7 -3
drivers/net/usb/qmi_wwan.c
··· 56 56 /* default ethernet address used by the modem */ 57 57 static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; 58 58 59 + static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; 60 + 59 61 /* Make up an ethernet header if the packet doesn't have one. 60 62 * 61 63 * A firmware bug common among several devices cause them to send raw ··· 334 332 usb_driver_release_interface(driver, info->data); 335 333 } 336 334 337 - /* Never use the same address on both ends of the link, even 338 - * if the buggy firmware told us to. 335 + /* Never use the same address on both ends of the link, even if the 336 + * buggy firmware told us to. Or, if device is assigned the well-known 337 + * buggy firmware MAC address, replace it with a random address, 339 338 */ 340 - if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) 339 + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) || 340 + ether_addr_equal(dev->net->dev_addr, buggy_fw_addr)) 341 341 eth_hw_addr_random(dev->net); 342 342 343 343 /* make MAC addr easily distinguishable from an IP header */
+17
drivers/net/usb/r8152.c
··· 1897 1897 netif_wake_queue(netdev); 1898 1898 } 1899 1899 1900 + static netdev_features_t 1901 + rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, 1902 + netdev_features_t features) 1903 + { 1904 + u32 mss = skb_shinfo(skb)->gso_size; 1905 + int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; 1906 + int offset = skb_transport_offset(skb); 1907 + 1908 + if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) 1909 + features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); 1910 + else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) 1911 + features &= ~NETIF_F_GSO_MASK; 1912 + 1913 + return features; 1914 + } 1915 + 1900 1916 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 1901 1917 struct net_device *netdev) 1902 1918 { ··· 3722 3706 .ndo_set_mac_address = rtl8152_set_mac_address, 3723 3707 .ndo_change_mtu = rtl8152_change_mtu, 3724 3708 .ndo_validate_addr = eth_validate_addr, 3709 + .ndo_features_check = rtl8152_features_check, 3725 3710 }; 3726 3711 3727 3712 static void r8152b_get_version(struct r8152 *tp)
-2
drivers/net/virtio_net.c
··· 760 760 container_of(napi, struct receive_queue, napi); 761 761 unsigned int r, received = 0; 762 762 763 - again: 764 763 received += virtnet_receive(rq, budget - received); 765 764 766 765 /* Out of packets? */ ··· 770 771 napi_schedule_prep(napi)) { 771 772 virtqueue_disable_cb(rq->vq); 772 773 __napi_schedule(napi); 773 - goto again; 774 774 } 775 775 } 776 776
+24 -10
drivers/net/vxlan.c
··· 1579 1579 bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); 1580 1580 1581 1581 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1582 - if (IS_ERR(skb)) 1583 - return -EINVAL; 1582 + if (IS_ERR(skb)) { 1583 + err = -EINVAL; 1584 + goto err; 1585 + } 1584 1586 1585 1587 skb_scrub_packet(skb, xnet); 1586 1588 ··· 1592 1590 1593 1591 /* Need space for new headers (invalidates iph ptr) */ 1594 1592 err = skb_cow_head(skb, min_headroom); 1595 - if (unlikely(err)) 1596 - return err; 1593 + if (unlikely(err)) { 1594 + kfree_skb(skb); 1595 + goto err; 1596 + } 1597 1597 1598 1598 skb = vlan_hwaccel_push_inside(skb); 1599 - if (WARN_ON(!skb)) 1600 - return -ENOMEM; 1599 + if (WARN_ON(!skb)) { 1600 + err = -ENOMEM; 1601 + goto err; 1602 + } 1601 1603 1602 1604 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1603 1605 vxh->vx_flags = htonl(VXLAN_FLAGS); ··· 1612 1606 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, 1613 1607 ttl, src_port, dst_port); 1614 1608 return 0; 1609 + err: 1610 + dst_release(dst); 1611 + return err; 1615 1612 } 1616 1613 #endif 1617 1614 ··· 1630 1621 1631 1622 skb = udp_tunnel_handle_offloads(skb, udp_sum); 1632 1623 if (IS_ERR(skb)) 1633 - return -EINVAL; 1624 + return PTR_ERR(skb); 1634 1625 1635 1626 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 1636 1627 + VXLAN_HLEN + sizeof(struct iphdr) ··· 1638 1629 1639 1630 /* Need space for new headers (invalidates iph ptr) */ 1640 1631 err = skb_cow_head(skb, min_headroom); 1641 - if (unlikely(err)) 1632 + if (unlikely(err)) { 1633 + kfree_skb(skb); 1642 1634 return err; 1635 + } 1643 1636 1644 1637 skb = vlan_hwaccel_push_inside(skb); 1645 1638 if (WARN_ON(!skb)) ··· 1787 1776 tos, ttl, df, src_port, dst_port, 1788 1777 htonl(vni << 8), 1789 1778 !net_eq(vxlan->net, dev_net(vxlan->dev))); 1790 - 1791 - if (err < 0) 1779 + if (err < 0) { 1780 + /* skb is already freed. */ 1781 + skb = NULL; 1792 1782 goto rt_tx_error; 1783 + } 1784 + 1793 1785 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 1794 1786 #if IS_ENABLED(CONFIG_IPV6) 1795 1787 } else {
+2 -2
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
··· 1070 1070 */ 1071 1071 if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && 1072 1072 ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || 1073 - (sdiodev->pdata->oob_irq_supported))) 1073 + (sdiodev->pdata && sdiodev->pdata->oob_irq_supported))) 1074 1074 bus_if->wowl_supported = true; 1075 1075 #endif 1076 1076 ··· 1167 1167 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1168 1168 1169 1169 brcmf_dbg(SDIO, "Enter\n"); 1170 - if (sdiodev->pdata->oob_irq_supported) 1170 + if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported) 1171 1171 disable_irq_wake(sdiodev->pdata->oob_irq_nr); 1172 1172 brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); 1173 1173 atomic_set(&sdiodev->suspend, false);
+2 -1
drivers/net/wireless/ipw2x00/Kconfig
··· 65 65 66 66 config IPW2200 67 67 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 68 - depends on PCI && CFG80211 && CFG80211_WEXT 68 + depends on PCI && CFG80211 69 + select CFG80211_WEXT 69 70 select WIRELESS_EXT 70 71 select WEXT_SPY 71 72 select WEXT_PRIV
+3 -3
drivers/net/wireless/iwlwifi/iwl-7000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL7260_UCODE_API_MAX 10 73 - #define IWL3160_UCODE_API_MAX 10 72 + #define IWL7260_UCODE_API_MAX 12 73 + #define IWL3160_UCODE_API_MAX 12 74 74 75 75 /* Oldest version we won't warn about */ 76 76 #define IWL7260_UCODE_API_OK 10 ··· 105 105 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 106 106 107 107 #define IWL7265D_FW_PRE "iwlwifi-7265D-" 108 - #define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 108 + #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" 109 109 110 110 #define NVM_HW_SECTION_NUM_FAMILY_7000 0 111 111
+1 -1
drivers/net/wireless/iwlwifi/iwl-8000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL8000_UCODE_API_MAX 10 72 + #define IWL8000_UCODE_API_MAX 12 73 73 74 74 /* Oldest version we won't warn about */ 75 75 #define IWL8000_UCODE_API_OK 10
+1 -1
drivers/net/wireless/iwlwifi/iwl-drv.c
··· 1323 1323 1324 1324 try_again: 1325 1325 /* try next, if any */ 1326 - kfree(pieces); 1327 1326 release_firmware(ucode_raw); 1328 1327 if (iwl_request_firmware(drv, false)) 1329 1328 goto out_unbind; 1329 + kfree(pieces); 1330 1330 return; 1331 1331 1332 1332 out_free_fw:
+1
drivers/net/wireless/iwlwifi/iwl-fh.h
··· 310 310 #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) 311 311 312 312 #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 313 + #define FH_MEM_TB_MAX_LENGTH (0x00020000) 313 314 314 315 /* TFDB Area - TFDs buffer table */ 315 316 #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+4
drivers/net/wireless/iwlwifi/iwl-fw-file.h
··· 243 243 * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. 244 244 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 245 245 * longer than the passive one, which is essential for fragmented scan. 246 + * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 247 + * regardless of the band or the number of the probes. FW will calculate 248 + * the actual dwell time. 246 249 */ 247 250 enum iwl_ucode_tlv_api { 248 251 IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), ··· 256 253 IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), 257 254 IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 258 255 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 256 + IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 259 257 }; 260 258 261 259 /**
+2
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
··· 672 672 * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented 673 673 * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report 674 674 * and DS parameter set IEs into probe requests. 675 + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches 675 676 */ 676 677 enum iwl_mvm_lmac_scan_flags { 677 678 IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), ··· 682 681 IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), 683 682 IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), 684 683 IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), 684 + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), 685 685 }; 686 686 687 687 enum iwl_scan_priority {
+13 -2
drivers/net/wireless/iwlwifi/mvm/mac80211.c
··· 1004 1004 { 1005 1005 lockdep_assert_held(&mvm->mutex); 1006 1006 1007 - /* disallow low power states when the FW is down */ 1008 - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1007 + /* 1008 + * Disallow low power states when the FW is down by taking 1009 + * the UCODE_DOWN ref. in case of ongoing hw restart the 1010 + * ref is already taken, so don't take it again. 1011 + */ 1012 + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1013 + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1009 1014 1010 1015 /* async_handlers_wk is now blocked */ 1011 1016 ··· 1027 1022 1028 1023 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1029 1024 iwl_mvm_del_aux_sta(mvm); 1025 + 1026 + /* 1027 + * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1028 + * won't be called in this case). 1029 + */ 1030 + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1030 1031 1031 1032 mvm->ucode_loaded = false; 1032 1033 }
+14 -5
drivers/net/wireless/iwlwifi/mvm/scan.c
··· 171 171 * already included in the probe template, so we need to set only 172 172 * req->n_ssids - 1 bits in addition to the first bit. 173 173 */ 174 - static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) 174 + static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, 175 + enum ieee80211_band band, int n_ssids) 175 176 { 177 + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) 178 + return 10; 176 179 if (band == IEEE80211_BAND_2GHZ) 177 180 return 20 + 3 * (n_ssids + 1); 178 181 return 10 + 2 * (n_ssids + 1); 179 182 } 180 183 181 - static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) 184 + static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, 185 + enum ieee80211_band band) 182 186 { 187 + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) 188 + return 110; 183 189 return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; 184 190 } 185 191 ··· 337 331 */ 338 332 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 339 333 u32 passive_dwell = 340 - iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); 334 + iwl_mvm_get_passive_dwell(mvm, 335 + IEEE80211_BAND_2GHZ); 341 336 params->max_out_time = passive_dwell; 342 337 } else { 343 338 params->passive_fragmented = true; ··· 355 348 params->dwell[band].passive = frag_passive_dwell; 356 349 else 357 350 params->dwell[band].passive = 358 - iwl_mvm_get_passive_dwell(band); 359 - params->dwell[band].active = iwl_mvm_get_active_dwell(band, 351 + iwl_mvm_get_passive_dwell(mvm, band); 352 + params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, 360 353 n_ssids); 361 354 } 362 355 } ··· 1455 1448 1456 1449 if (iwl_mvm_scan_pass_all(mvm, req)) 1457 1450 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; 1451 + else 1452 + flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; 1458 1453 1459 1454 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) 1460 1455 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+6 -2
drivers/net/wireless/iwlwifi/mvm/tx.c
··· 108 108 tx_flags &= ~TX_CMD_FLG_SEQ_CTL; 109 109 } 110 110 111 - /* tid_tspec will default to 0 = BE when QOS isn't enabled */ 112 - ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 111 + /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ 112 + if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) 113 + ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; 114 + else 115 + ac = tid_to_mac80211_ac[0]; 116 + 113 117 tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << 114 118 TX_CMD_FLG_BT_PRIO_POS; 115 119
+1 -1
drivers/net/wireless/iwlwifi/mvm/utils.c
··· 665 665 if (num_of_ant(mvm->fw->valid_rx_ant) == 1) 666 666 return false; 667 667 668 - if (!mvm->cfg->rx_with_siso_diversity) 668 + if (mvm->cfg->rx_with_siso_diversity) 669 669 return false; 670 670 671 671 ieee80211_iterate_active_interfaces_atomic(
+7 -1
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 367 367 368 368 /* 3165 Series */ 369 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 370 + {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 371 + {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 370 372 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, 373 + {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 374 + {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 371 375 372 376 /* 7265 Series */ 373 377 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, ··· 527 523 else if (cfg == &iwl7265_n_cfg) 528 524 cfg_7265d = &iwl7265d_n_cfg; 529 525 if (cfg_7265d && 530 - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) 526 + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { 531 527 cfg = cfg_7265d; 528 + iwl_trans->cfg = cfg_7265d; 529 + } 532 530 #endif 533 531 534 532 pci_set_drvdata(pdev, iwl_trans);
+11 -6
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 614 614 { 615 615 u8 *v_addr; 616 616 dma_addr_t p_addr; 617 - u32 offset, chunk_sz = section->len; 617 + u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 618 618 int ret = 0; 619 619 620 620 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", ··· 1012 1012 /* Stop the device, and put it in low power state */ 1013 1013 iwl_pcie_apm_stop(trans); 1014 1014 1015 - /* Upon stop, the APM issues an interrupt if HW RF kill is set. 1016 - * Clean again the interrupt here 1015 + /* stop and reset the on-board processor */ 1016 + iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1017 + udelay(20); 1018 + 1019 + /* 1020 + * Upon stop, the APM issues an interrupt if HW RF kill is set. 1021 + * This is a bug in certain verions of the hardware. 1022 + * Certain devices also keep sending HW RF kill interrupt all 1023 + * the time, unless the interrupt is ACKed even if the interrupt 1024 + * should be masked. Re-ACK all the interrupts here. 1017 1025 */ 1018 1026 spin_lock(&trans_pcie->irq_lock); 1019 1027 iwl_disable_interrupts(trans); 1020 1028 spin_unlock(&trans_pcie->irq_lock); 1021 1029 1022 - /* stop and reset the on-board processor */ 1023 - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1024 - udelay(20); 1025 1030 1026 1031 /* clear all status bits */ 1027 1032 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+25 -9
drivers/net/wireless/rtlwifi/pci.c
··· 666 666 } 667 667 668 668 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, 669 - u8 *entry, int rxring_idx, int desc_idx) 669 + struct sk_buff *new_skb, u8 *entry, 670 + int rxring_idx, int desc_idx) 670 671 { 671 672 struct rtl_priv *rtlpriv = rtl_priv(hw); 672 673 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); ··· 675 674 u8 tmp_one = 1; 676 675 struct sk_buff *skb; 677 676 677 + if (likely(new_skb)) { 678 + skb = new_skb; 679 + goto remap; 680 + } 678 681 skb = dev_alloc_skb(rtlpci->rxbuffersize); 679 682 if (!skb) 680 683 return 0; 681 - rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; 682 684 685 + remap: 683 686 /* just set skb->cb to mapping addr for pci_unmap_single use */ 684 687 *((dma_addr_t *)skb->cb) = 685 688 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), ··· 691 686 bufferaddress = *((dma_addr_t *)skb->cb); 692 687 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) 693 688 return 0; 689 + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; 694 690 if (rtlpriv->use_new_trx_flow) { 695 691 rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, 696 692 HW_DESC_RX_PREPARE, ··· 787 781 /*rx pkt */ 788 782 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ 789 783 rtlpci->rx_ring[rxring_idx].idx]; 784 + struct sk_buff *new_skb; 790 785 791 786 if (rtlpriv->use_new_trx_flow) { 792 787 rx_remained_cnt = ··· 814 807 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), 815 808 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); 816 809 810 + /* get a new skb - if fail, old one will be reused */ 811 + new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 812 + if (unlikely(!new_skb)) { 813 + pr_err("Allocation of new skb failed in %s\n", 814 + __func__); 815 + goto no_new; 816 + } 817 817 if (rtlpriv->use_new_trx_flow) { 818 818 buffer_desc = 819 819 &rtlpci->rx_ring[rxring_idx].buffer_desc ··· 925 911 schedule_work(&rtlpriv->works.lps_change_work); 926 912 } 927 913 end: 914 + skb = new_skb; 915 + no_new: 928 916 if (rtlpriv->use_new_trx_flow) { 929 - _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, 917 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, 930 918 rxring_idx, 931 - rtlpci->rx_ring[rxring_idx].idx); 932 - } else { 933 - _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, 934 919 rtlpci->rx_ring[rxring_idx].idx); 935 - 920 + } else { 921 + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, 922 + rxring_idx, 923 + rtlpci->rx_ring[rxring_idx].idx); 936 924 if (rtlpci->rx_ring[rxring_idx].idx == 937 925 rtlpci->rxringcount - 1) 938 926 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, ··· 1323 1307 rtlpci->rx_ring[rxring_idx].idx = 0; 1324 1308 for (i = 0; i < rtlpci->rxringcount; i++) { 1325 1309 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; 1326 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1310 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, 1327 1311 rxring_idx, i)) 1328 1312 return -ENOMEM; 1329 1313 } ··· 1348 1332 1349 1333 for (i = 0; i < rtlpci->rxringcount; i++) { 1350 1334 entry = &rtlpci->rx_ring[rxring_idx].desc[i]; 1351 - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, 1335 + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, 1352 1336 rxring_idx, i)) 1353 1337 return -ENOMEM; 1354 1338 }
+1
drivers/net/xen-netback/xenbus.c
··· 737 737 } 738 738 739 739 queue->remaining_credit = credit_bytes; 740 + queue->credit_usec = credit_usec; 740 741 741 742 err = connect_rings(be, queue); 742 743 if (err) {
+42 -29
drivers/net/xen-netfront.c
··· 88 88 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 89 89 90 90 struct netfront_stats { 91 - u64 rx_packets; 92 - u64 tx_packets; 93 - u64 rx_bytes; 94 - u64 tx_bytes; 91 + u64 packets; 92 + u64 bytes; 95 93 struct u64_stats_sync syncp; 96 94 }; 97 95 ··· 158 160 struct netfront_queue *queues; 159 161 160 162 /* Statistics */ 161 - struct netfront_stats __percpu *stats; 163 + struct netfront_stats __percpu *rx_stats; 164 + struct netfront_stats __percpu *tx_stats; 162 165 163 166 atomic_t rx_gso_checksum_fixup; 164 167 }; ··· 564 565 { 565 566 unsigned short id; 566 567 struct netfront_info *np = netdev_priv(dev); 567 - struct netfront_stats *stats = this_cpu_ptr(np->stats); 568 + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 568 569 struct xen_netif_tx_request *tx; 569 570 char *data = skb->data; 570 571 RING_IDX i; ··· 671 672 if (notify) 672 673 notify_remote_via_irq(queue->tx_irq); 673 674 674 - u64_stats_update_begin(&stats->syncp); 675 - stats->tx_bytes += skb->len; 676 - stats->tx_packets++; 677 - u64_stats_update_end(&stats->syncp); 675 + u64_stats_update_begin(&tx_stats->syncp); 676 + tx_stats->bytes += skb->len; 677 + tx_stats->packets++; 678 + u64_stats_update_end(&tx_stats->syncp); 678 679 679 680 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 680 681 xennet_tx_buf_gc(queue); ··· 930 931 static int handle_incoming_queue(struct netfront_queue *queue, 931 932 struct sk_buff_head *rxq) 932 933 { 933 - struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); 934 + struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 934 935 int packets_dropped = 0; 935 936 struct sk_buff *skb; 936 937 ··· 951 952 continue; 952 953 } 953 954 954 - u64_stats_update_begin(&stats->syncp); 955 - stats->rx_packets++; 956 - stats->rx_bytes += skb->len; 957 - u64_stats_update_end(&stats->syncp); 955 + u64_stats_update_begin(&rx_stats->syncp); 956 + rx_stats->packets++; 957 + rx_stats->bytes += skb->len; 958 + u64_stats_update_end(&rx_stats->syncp); 958 959 959 960 /* Pass it up. */ 960 961 napi_gro_receive(&queue->napi, skb); ··· 1078 1079 int cpu; 1079 1080 1080 1081 for_each_possible_cpu(cpu) { 1081 - struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); 1082 + struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1083 + struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1082 1084 u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1083 1085 unsigned int start; 1084 1086 1085 1087 do { 1086 - start = u64_stats_fetch_begin_irq(&stats->syncp); 1088 + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1089 + tx_packets = tx_stats->packets; 1090 + tx_bytes = tx_stats->bytes; 1091 + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1087 1092 1088 - rx_packets = stats->rx_packets; 1089 - tx_packets = stats->tx_packets; 1090 - rx_bytes = stats->rx_bytes; 1091 - tx_bytes = stats->tx_bytes; 1092 - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1093 + do { 1094 + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1095 + rx_packets = rx_stats->packets; 1096 + rx_bytes = rx_stats->bytes; 1097 + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1093 1098 1094 1099 tot->rx_packets += rx_packets; 1095 1100 tot->tx_packets += tx_packets; ··· 1278 1275 #endif 1279 1276 }; 1280 1277 1278 + static void xennet_free_netdev(struct net_device *netdev) 1279 + { 1280 + struct netfront_info *np = netdev_priv(netdev); 1281 + 1282 + free_percpu(np->rx_stats); 1283 + free_percpu(np->tx_stats); 1284 + free_netdev(netdev); 1285 + } 1286 + 1281 1287 static struct net_device *xennet_create_dev(struct xenbus_device *dev) 1282 1288 { 1283 1289 int err; ··· 1307 1295 np->queues = NULL; 1308 1296 1309 1297 err = -ENOMEM; 1310 - np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1311 - if (np->stats == NULL) 1298 + np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1299 + if (np->rx_stats == NULL) 1300 + goto exit; 1301 + np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1302 + if (np->tx_stats == NULL) 1312 1303 goto exit; 1313 1304 1314 1305 netdev->netdev_ops = &xennet_netdev_ops; ··· 1342 1327 return netdev; 1343 1328 1344 1329 exit: 1345 - free_netdev(netdev); 1330 + xennet_free_netdev(netdev); 1346 1331 return ERR_PTR(err); 1347 1332 } 1348 1333 ··· 1384 1369 return 0; 1385 1370 1386 1371 fail: 1387 - free_netdev(netdev); 1372 + xennet_free_netdev(netdev); 1388 1373 dev_set_drvdata(&dev->dev, NULL); 1389 1374 return err; 1390 1375 } ··· 2204 2189 info->queues = NULL; 2205 2190 } 2206 2191 2207 - free_percpu(info->stats); 2208 - 2209 - free_netdev(info->netdev); 2192 + xennet_free_netdev(info->netdev); 2210 2193 2211 2194 return 0; 2212 2195 }
+2 -1
drivers/phy/phy-miphy28lp.c
··· 1050 1050 ret = miphy28lp_init_usb3(miphy_phy); 1051 1051 break; 1052 1052 default: 1053 - return -EINVAL; 1053 + ret = -EINVAL; 1054 + break; 1054 1055 } 1055 1056 1056 1057 mutex_unlock(&miphy_dev->miphy_mutex);
+3 -4
drivers/phy/phy-omap-control.c
··· 29 29 /** 30 30 * omap_control_pcie_pcs - set the PCS delay count 31 31 * @dev: the control module device 32 - * @id: index of the pcie PHY (should be 1 or 2) 33 32 * @delay: 8 bit delay value 34 33 */ 35 - void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 34 + void omap_control_pcie_pcs(struct device *dev, u8 delay) 36 35 { 37 36 u32 val; 38 37 struct omap_control_phy *control_phy; ··· 54 55 55 56 val = readl(control_phy->pcie_pcs); 56 57 val &= ~(OMAP_CTRL_PCIE_PCS_MASK << 57 - (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT)); 58 - val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); 58 + OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); 59 + val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); 59 60 writel(val, control_phy->pcie_pcs); 60 61 } 61 62 EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
+2 -1
drivers/phy/phy-sun4i-usb.c
··· 244 244 else 245 245 data->num_phys = 3; 246 246 247 - if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy")) 247 + if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") || 248 + of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy")) 248 249 data->disc_thresh = 3; 249 250 else 250 251 data->disc_thresh = 2;
+6 -4
drivers/phy/phy-ti-pipe3.c
··· 82 82 struct clk *refclk; 83 83 struct clk *div_clk; 84 84 struct pipe3_dpll_map *dpll_map; 85 - u8 id; 86 85 }; 87 86 88 87 static struct pipe3_dpll_map dpll_map_usb[] = { ··· 216 217 u32 val; 217 218 int ret = 0; 218 219 220 + /* 221 + * Set pcie_pcs register to 0x96 for proper functioning of phy 222 + * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table 223 + * 18-1804. 224 + */ 219 225 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { 220 - omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1); 226 + omap_control_pcie_pcs(phy->control_dev, 0x96); 221 227 return 0; 222 228 } 223 229 ··· 351 347 } 352 348 353 349 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 354 - if (of_property_read_u8(node, "id", &phy->id) < 0) 355 - phy->id = 1; 356 350 357 351 clk = devm_clk_get(phy->dev, "dpll_ref"); 358 352 if (IS_ERR(clk)) {
+54 -3
drivers/pinctrl/pinctrl-rockchip.c
··· 89 89 * @reg_pull: optional separate register for additional pull settings 90 90 * @clk: clock of the gpio bank 91 91 * @irq: interrupt of the gpio bank 92 + * @saved_enables: Saved content of GPIO_INTEN at suspend time. 92 93 * @pin_base: first pin number 93 94 * @nr_pins: number of pins in this bank 94 95 * @name: name of the bank ··· 108 107 struct regmap *regmap_pull; 109 108 struct clk *clk; 110 109 int irq; 110 + u32 saved_enables; 111 111 u32 pin_base; 112 112 u8 nr_pins; 113 113 char *name; ··· 1545 1543 return 0; 1546 1544 } 1547 1545 1546 + static void rockchip_irq_suspend(struct irq_data *d) 1547 + { 1548 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1549 + struct rockchip_pin_bank *bank = gc->private; 1550 + 1551 + bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN); 1552 + irq_reg_writel(gc, gc->wake_active, GPIO_INTEN); 1553 + } 1554 + 1555 + static void rockchip_irq_resume(struct irq_data *d) 1556 + { 1557 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1558 + struct rockchip_pin_bank *bank = gc->private; 1559 + 1560 + irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN); 1561 + } 1562 + 1563 + static void rockchip_irq_disable(struct irq_data *d) 1564 + { 1565 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1566 + u32 val; 1567 + 1568 + irq_gc_lock(gc); 1569 + 1570 + val = irq_reg_readl(gc, GPIO_INTEN); 1571 + val &= ~d->mask; 1572 + irq_reg_writel(gc, val, GPIO_INTEN); 1573 + 1574 + irq_gc_unlock(gc); 1575 + } 1576 + 1577 + static void rockchip_irq_enable(struct irq_data *d) 1578 + { 1579 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 1580 + u32 val; 1581 + 1582 + irq_gc_lock(gc); 1583 + 1584 + val = irq_reg_readl(gc, GPIO_INTEN); 1585 + val |= d->mask; 1586 + irq_reg_writel(gc, val, GPIO_INTEN); 1587 + 1588 + irq_gc_unlock(gc); 1589 + } 1590 + 1548 1591 static int rockchip_interrupts_register(struct platform_device *pdev, 1549 1592 struct rockchip_pinctrl *info) 1550 1593 { ··· 1628 1581 gc = irq_get_domain_generic_chip(bank->domain, 0); 1629 1582 gc->reg_base = bank->reg_base; 1630 1583 gc->private = bank; 1631 - gc->chip_types[0].regs.mask = GPIO_INTEN; 1584 + gc->chip_types[0].regs.mask = GPIO_INTMASK; 1632 1585 gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; 1633 1586 gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; 1634 - gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; 1635 - gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; 1587 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; 1588 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; 1589 + gc->chip_types[0].chip.irq_enable = rockchip_irq_enable; 1590 + gc->chip_types[0].chip.irq_disable = rockchip_irq_disable; 1636 1591 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 1592 + gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; 1593 + gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; 1637 1594 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 1638 1595 gc->wake_enabled = IRQ_MSK(bank->nr_pins); 1639 1596
+4 -1
drivers/pinctrl/pinctrl-st.c
··· 1012 1012 struct seq_file *s, unsigned pin_id) 1013 1013 { 1014 1014 unsigned long config; 1015 - st_pinconf_get(pctldev, pin_id, &config); 1016 1015 1016 + mutex_unlock(&pctldev->mutex); 1017 + st_pinconf_get(pctldev, pin_id, &config); 1018 + mutex_lock(&pctldev->mutex); 1017 1019 seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" 1018 1020 "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," 1019 1021 "de:%ld,rt-clk:%ld,rt-delay:%ld]", ··· 1445 1443 1446 1444 static struct irq_chip st_gpio_irqchip = { 1447 1445 .name = "GPIO", 1446 + .irq_disable = st_gpio_irq_mask, 1448 1447 .irq_mask = st_gpio_irq_mask, 1449 1448 .irq_unmask = st_gpio_irq_unmask, 1450 1449 .irq_set_type = st_gpio_irq_set_type,
+1
drivers/powercap/intel_rapl.c
··· 1041 1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1042 1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1043 1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ 1044 + RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */ 1044 1045 RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */ 1045 1046 {} 1046 1047 };
+12 -7
drivers/regulator/s2mps11.c
··· 570 570 .enable_mask = S2MPS14_ENABLE_MASK \ 571 571 } 572 572 573 - #define regulator_desc_s2mps14_buck(num, min, step) { \ 573 + #define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \ 574 574 .name = "BUCK"#num, \ 575 575 .id = S2MPS14_BUCK##num, \ 576 576 .ops = &s2mps14_reg_ops, \ ··· 579 579 .min_uV = min, \ 580 580 .uV_step = step, \ 581 581 .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ 582 - .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \ 582 + .linear_min_sel = min_sel, \ 583 583 .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \ 584 584 .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \ 585 585 .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ ··· 613 613 regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV), 614 614 regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV), 615 615 regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV), 616 - regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV), 617 - regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV), 618 - regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV), 619 - regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV), 620 - regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV), 616 + regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV, 617 + S2MPS14_BUCK1235_START_SEL), 618 + regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV, 619 + S2MPS14_BUCK1235_START_SEL), 620 + regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV, 621 + S2MPS14_BUCK1235_START_SEL), 622 + regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV, 623 + S2MPS14_BUCK4_START_SEL), 624 + regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV, 625 + S2MPS14_BUCK1235_START_SEL), 621 626 }; 622 627 623 628 static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
+4
drivers/reset/reset-sunxi.c
··· 102 102 goto err_alloc; 103 103 } 104 104 105 + spin_lock_init(&data->lock); 106 + 105 107 data->rcdev.owner = THIS_MODULE; 106 108 data->rcdev.nr_resets = size * 32; 107 109 data->rcdev.ops = &sunxi_reset_ops; ··· 158 156 data->membase = devm_ioremap_resource(&pdev->dev, res); 159 157 if (IS_ERR(data->membase)) 160 158 return PTR_ERR(data->membase); 159 + 160 + spin_lock_init(&data->lock); 161 161 162 162 data->rcdev.owner = THIS_MODULE; 163 163 data->rcdev.nr_resets = resource_size(res) * 32;
+7 -3
drivers/s390/crypto/ap_bus.c
··· 1163 1163 */ 1164 1164 static inline int ap_test_config_domain(unsigned int domain) 1165 1165 { 1166 - if (!ap_configuration) 1167 - return 1; 1168 - return ap_test_config(ap_configuration->aqm, domain); 1166 + if (!ap_configuration) /* QCI not supported */ 1167 + if (domain < 16) 1168 + return 1; /* then domains 0...15 are configured */ 1169 + else 1170 + return 0; 1171 + else 1172 + return ap_test_config(ap_configuration->aqm, domain); 1169 1173 } 1170 1174 1171 1175 /**
+1 -1
drivers/scsi/fnic/fnic.h
··· 39 39 40 40 #define DRV_NAME "fnic" 41 41 #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 42 - #define DRV_VERSION "1.6.0.16" 42 + #define DRV_VERSION "1.6.0.17" 43 43 #define PFX DRV_NAME ": " 44 44 #define DFX DRV_NAME "%d: " 45 45
+15
drivers/scsi/fnic/fnic_scsi.c
··· 1892 1892 goto fnic_abort_cmd_end; 1893 1893 } 1894 1894 1895 + /* IO out of order */ 1896 + 1897 + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { 1898 + spin_unlock_irqrestore(io_lock, flags); 1899 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1900 + "Issuing Host reset due to out of order IO\n"); 1901 + 1902 + if (fnic_host_reset(sc) == FAILED) { 1903 + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, 1904 + "fnic_host_reset failed.\n"); 1905 + } 1906 + ret = FAILED; 1907 + goto fnic_abort_cmd_end; 1908 + } 1909 + 1895 1910 CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; 1896 1911 1897 1912 /*
+3 -1
drivers/scsi/qla2xxx/qla_os.c
··· 734 734 * Return target busy if we've received a non-zero retry_delay_timer 735 735 * in a FCP_RSP. 736 736 */ 737 - if (time_after(jiffies, fcport->retry_delay_timestamp)) 737 + if (fcport->retry_delay_timestamp == 0) { 738 + /* retry delay not set */ 739 + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) 738 740 fcport->retry_delay_timestamp = 0; 739 741 else 740 742 goto qc24_target_busy;
+2 -2
drivers/scsi/scsi_error.c
··· 1041 1041 } 1042 1042 /* signal not to enter either branch of the if () below */ 1043 1043 timeleft = 0; 1044 - rtn = NEEDS_RETRY; 1044 + rtn = FAILED; 1045 1045 } else { 1046 1046 timeleft = wait_for_completion_timeout(&done, timeout); 1047 1047 rtn = SUCCESS; ··· 1081 1081 rtn = FAILED; 1082 1082 break; 1083 1083 } 1084 - } else if (!rtn) { 1084 + } else if (rtn != FAILED) { 1085 1085 scsi_abort_eh_cmnd(scmd); 1086 1086 rtn = FAILED; 1087 1087 }
+1 -2
drivers/scsi/scsi_lib.c
··· 591 591 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) 592 592 { 593 593 struct scatterlist *first_chunk = NULL; 594 - gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC; 595 594 int ret; 596 595 597 596 BUG_ON(!nents); ··· 605 606 } 606 607 607 608 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 608 - first_chunk, gfp_mask, scsi_sg_alloc); 609 + first_chunk, GFP_ATOMIC, scsi_sg_alloc); 609 610 if (unlikely(ret)) 610 611 scsi_free_sgtable(sdb, mq); 611 612 return ret;
+3 -2
drivers/scsi/sd.c
··· 2623 2623 sd_config_discard(sdkp, SD_LBP_WS16); 2624 2624 2625 2625 } else { /* LBP VPD page tells us what to use */ 2626 - 2627 - if (sdkp->lbpws) 2626 + if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz) 2627 + sd_config_discard(sdkp, SD_LBP_UNMAP); 2628 + else if (sdkp->lbpws) 2628 2629 sd_config_discard(sdkp, SD_LBP_WS16); 2629 2630 else if (sdkp->lbpws10) 2630 2631 sd_config_discard(sdkp, SD_LBP_WS10);
+4 -4
drivers/spi/spi-img-spfi.c
··· 341 341 default: 342 342 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA; 343 343 rxconf.src_addr_width = 1; 344 - rxconf.src_maxburst = 1; 344 + rxconf.src_maxburst = 4; 345 345 } 346 346 dmaengine_slave_config(spfi->rx_ch, &rxconf); 347 347 ··· 368 368 default: 369 369 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA; 370 370 txconf.dst_addr_width = 1; 371 - txconf.dst_maxburst = 1; 371 + txconf.dst_maxburst = 4; 372 372 break; 373 373 } 374 374 dmaengine_slave_config(spfi->tx_ch, &txconf); ··· 390 390 dma_async_issue_pending(spfi->rx_ch); 391 391 } 392 392 393 + spfi_start(spfi); 394 + 393 395 if (xfer->tx_buf) { 394 396 spfi->tx_dma_busy = true; 395 397 dmaengine_submit(txdesc); 396 398 dma_async_issue_pending(spfi->tx_ch); 397 399 } 398 - 399 - spfi_start(spfi); 400 400 401 401 return 1; 402 402
+5
drivers/spi/spi-sh-msiof.c
··· 480 480 struct device_node *np = spi->master->dev.of_node; 481 481 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); 482 482 483 + pm_runtime_get_sync(&p->pdev->dev); 484 + 483 485 if (!np) { 484 486 /* 485 487 * Use spi->controller_data for CS (same strategy as spi_gpio), ··· 499 497 500 498 if (spi->cs_gpio >= 0) 501 499 gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 500 + 501 + 502 + pm_runtime_put_sync(&p->pdev->dev); 502 503 503 504 return 0; 504 505 }
+1 -1
drivers/staging/vt6655/baseband.c
··· 2178 2178 /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ 2179 2179 /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ 2180 2180 /* Select VC1/VC2, CR215 = 0x02->0x06 */ 2181 - bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06); 2181 + bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); 2182 2182 /* }} */ 2183 2183 2184 2184 for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
+8
drivers/staging/vt6655/channel.c
··· 182 182 if (pDevice->byCurrentCh == uConnectionChannel) 183 183 return bResult; 184 184 185 + /* Set VGA to max sensitivity */ 186 + if (pDevice->bUpdateBBVGA && 187 + pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) { 188 + pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; 189 + 190 + BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); 191 + } 192 + 185 193 /* clear NAV */ 186 194 MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); 187 195
+10 -3
drivers/staging/vt6655/device_main.c
··· 1232 1232 1233 1233 head_td = priv->apCurrTD[dma_idx]; 1234 1234 1235 - head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP); 1235 + head_td->m_td1TD1.byTCR = 0; 1236 1236 1237 1237 head_td->pTDInfo->skb = skb; 1238 1238 ··· 1256 1256 spin_lock_irqsave(&priv->lock, flags); 1257 1257 1258 1258 priv->bPWBitOn = false; 1259 + 1260 + /* Set TSR1 & ReqCount in TxDescHead */ 1261 + head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); 1262 + head_td->m_td1TD1.wReqCount = 1263 + cpu_to_le16((u16)head_td->pTDInfo->dwReqCount); 1259 1264 1260 1265 head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; 1261 1266 ··· 1505 1500 if (conf->enable_beacon) { 1506 1501 vnt_beacon_enable(priv, vif, conf); 1507 1502 1508 - MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX); 1503 + MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR, 1504 + TCR_AUTOBCNTX); 1509 1505 } else { 1510 - MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX); 1506 + MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, 1507 + TCR_AUTOBCNTX); 1511 1508 } 1512 1509 } 1513 1510
+1 -4
drivers/staging/vt6655/rxtx.c
··· 1204 1204 1205 1205 ptdCurr = (PSTxDesc)pHeadTD; 1206 1206 1207 - ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; 1207 + ptdCurr->pTDInfo->dwReqCount = cbReqCount; 1208 1208 ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; 1209 1209 ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; 1210 1210 ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); 1211 - /* Set TSR1 & ReqCount in TxDescHead */ 1212 - ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); 1213 - ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); 1214 1211 1215 1212 return cbHeaderLength; 1216 1213 }
+6 -6
drivers/target/iscsi/iscsi_target.c
··· 2027 2027 goto reject; 2028 2028 } 2029 2029 if (!strncmp("=All", text_ptr, 4)) { 2030 - cmd->cmd_flags |= IFC_SENDTARGETS_ALL; 2030 + cmd->cmd_flags |= ICF_SENDTARGETS_ALL; 2031 2031 } else if (!strncmp("=iqn.", text_ptr, 5) || 2032 2032 !strncmp("=eui.", text_ptr, 5)) { 2033 - cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; 2033 + cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; 2034 2034 } else { 2035 2035 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); 2036 2036 goto reject; ··· 3415 3415 return -ENOMEM; 3416 3416 } 3417 3417 /* 3418 - * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE 3418 + * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE 3419 3419 * explicit case.. 3420 3420 */ 3421 - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { 3421 + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { 3422 3422 text_ptr = strchr(text_in, '='); 3423 3423 if (!text_ptr) { 3424 3424 pr_err("Unable to locate '=' string in text_in:" ··· 3434 3434 3435 3435 spin_lock(&tiqn_lock); 3436 3436 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3437 - if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && 3437 + if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && 3438 3438 strcmp(tiqn->tiqn, text_ptr)) { 3439 3439 continue; 3440 3440 } ··· 3512 3512 if (end_of_buf) 3513 3513 break; 3514 3514 3515 - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) 3515 + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3516 3516 break; 3517 3517 } 3518 3518 spin_unlock(&tiqn_lock);
+2 -2
drivers/target/iscsi/iscsi_target_core.h
··· 135 135 ICF_CONTIG_MEMORY = 0x00000020, 136 136 ICF_ATTACHED_TO_RQUEUE = 0x00000040, 137 137 ICF_OOO_CMDSN = 0x00000080, 138 - IFC_SENDTARGETS_ALL = 0x00000100, 139 - IFC_SENDTARGETS_SINGLE = 0x00000200, 138 + ICF_SENDTARGETS_ALL = 0x00000100, 139 + ICF_SENDTARGETS_SINGLE = 0x00000200, 140 140 }; 141 141 142 142 /* struct iscsi_cmd->i_state */
+4 -50
drivers/target/target_core_device.c
··· 1103 1103 } 1104 1104 EXPORT_SYMBOL(se_dev_set_queue_depth); 1105 1105 1106 - int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1107 - { 1108 - int block_size = dev->dev_attrib.block_size; 1109 - 1110 - if (dev->export_count) { 1111 - pr_err("dev[%p]: Unable to change SE Device" 1112 - " fabric_max_sectors while export_count is %d\n", 1113 - dev, dev->export_count); 1114 - return -EINVAL; 1115 - } 1116 - if (!fabric_max_sectors) { 1117 - pr_err("dev[%p]: Illegal ZERO value for" 1118 - " fabric_max_sectors\n", dev); 1119 - return -EINVAL; 1120 - } 1121 - if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { 1122 - pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" 1123 - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, 1124 - DA_STATUS_MAX_SECTORS_MIN); 1125 - return -EINVAL; 1126 - } 1127 - if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { 1128 - pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1129 - " greater than DA_STATUS_MAX_SECTORS_MAX:" 1130 - " %u\n", dev, fabric_max_sectors, 1131 - DA_STATUS_MAX_SECTORS_MAX); 1132 - return -EINVAL; 1133 - } 1134 - /* 1135 - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1136 - */ 1137 - if (!block_size) { 1138 - block_size = 512; 1139 - pr_warn("Defaulting to 512 for zero block_size\n"); 1140 - } 1141 - fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, 1142 - block_size); 1143 - 1144 - dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; 1145 - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", 1146 - dev, fabric_max_sectors); 1147 - return 0; 1148 - } 1149 - EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); 1150 - 1151 1106 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1152 1107 { 1153 1108 if (dev->export_count) { ··· 1111 1156 dev, dev->export_count); 1112 1157 return -EINVAL; 1113 1158 } 1114 - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1159 + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { 1115 1160 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1116 - " greater than fabric_max_sectors: %u\n", dev, 1117 - optimal_sectors, dev->dev_attrib.fabric_max_sectors); 1161 + " greater than hw_max_sectors: %u\n", dev, 1162 + optimal_sectors, dev->dev_attrib.hw_max_sectors); 1118 1163 return -EINVAL; 1119 1164 } 1120 1165 ··· 1508 1553 dev->dev_attrib.unmap_granularity_alignment = 1509 1554 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; 1510 1555 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; 1511 - dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; 1512 - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; 1513 1556 1514 1557 xcopy_lun = &dev->xcopy_lun; 1515 1558 xcopy_lun->lun_se_dev = dev; ··· 1548 1595 dev->dev_attrib.hw_max_sectors = 1549 1596 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, 1550 1597 dev->dev_attrib.hw_block_size); 1598 + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; 1551 1599 1552 1600 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1553 1601 dev->creation_time = get_jiffies_64();
+10 -2
drivers/target/target_core_file.c
··· 621 621 struct fd_prot fd_prot; 622 622 sense_reason_t rc; 623 623 int ret = 0; 624 - 624 + /* 625 + * We are currently limited by the number of iovecs (2048) per 626 + * single vfs_[writev,readv] call. 627 + */ 628 + if (cmd->data_length > FD_MAX_BYTES) { 629 + pr_err("FILEIO: Not able to process I/O of %u bytes due to" 630 + "FD_MAX_BYTES: %u iovec count limitiation\n", 631 + cmd->data_length, FD_MAX_BYTES); 632 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 633 + } 625 634 /* 626 635 * Call vectorized fileio functions to map struct scatterlist 627 636 * physical memory addresses to struct iovec virtual memory. ··· 968 959 &fileio_dev_attrib_hw_block_size.attr, 969 960 &fileio_dev_attrib_block_size.attr, 970 961 &fileio_dev_attrib_hw_max_sectors.attr, 971 - &fileio_dev_attrib_fabric_max_sectors.attr, 972 962 &fileio_dev_attrib_optimal_sectors.attr, 973 963 &fileio_dev_attrib_hw_queue_depth.attr, 974 964 &fileio_dev_attrib_queue_depth.attr,
+1 -2
drivers/target/target_core_iblock.c
··· 124 124 q = bdev_get_queue(bd); 125 125 126 126 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 127 - dev->dev_attrib.hw_max_sectors = UINT_MAX; 127 + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 128 128 dev->dev_attrib.hw_queue_depth = q->nr_requests; 129 129 130 130 /* ··· 883 883 &iblock_dev_attrib_hw_block_size.attr, 884 884 &iblock_dev_attrib_block_size.attr, 885 885 &iblock_dev_attrib_hw_max_sectors.attr, 886 - &iblock_dev_attrib_fabric_max_sectors.attr, 887 886 &iblock_dev_attrib_optimal_sectors.attr, 888 887 &iblock_dev_attrib_hw_queue_depth.attr, 889 888 &iblock_dev_attrib_queue_depth.attr,
+12
drivers/target/target_core_pr.c
··· 528 528 529 529 return 0; 530 530 } 531 + } else if (we && registered_nexus) { 532 + /* 533 + * Reads are allowed for Write Exclusive locks 534 + * from all registrants. 535 + */ 536 + if (cmd->data_direction == DMA_FROM_DEVICE) { 537 + pr_debug("Allowing READ CDB: 0x%02x for %s" 538 + " reservation\n", cdb[0], 539 + core_scsi3_pr_dump_type(pr_reg_type)); 540 + 541 + return 0; 542 + } 531 543 } 532 544 pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" 533 545 " for %s reservation\n", transport_dump_cmd_direction(cmd),
-1
drivers/target/target_core_rd.c
··· 657 657 &rd_mcp_dev_attrib_hw_block_size.attr, 658 658 &rd_mcp_dev_attrib_block_size.attr, 659 659 &rd_mcp_dev_attrib_hw_max_sectors.attr, 660 - &rd_mcp_dev_attrib_fabric_max_sectors.attr, 661 660 &rd_mcp_dev_attrib_optimal_sectors.attr, 662 661 &rd_mcp_dev_attrib_hw_queue_depth.attr, 663 662 &rd_mcp_dev_attrib_queue_depth.attr,
-15
drivers/target/target_core_sbc.c
··· 953 953 954 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 955 955 unsigned long long end_lba; 956 - 957 - if (sectors > dev->dev_attrib.fabric_max_sectors) { 958 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 959 - " big sectors %u exceeds fabric_max_sectors:" 960 - " %u\n", cdb[0], sectors, 961 - dev->dev_attrib.fabric_max_sectors); 962 - return TCM_INVALID_CDB_FIELD; 963 - } 964 - if (sectors > dev->dev_attrib.hw_max_sectors) { 965 - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" 966 - " big sectors %u exceeds backend hw_max_sectors:" 967 - " %u\n", cdb[0], sectors, 968 - dev->dev_attrib.hw_max_sectors); 969 - return TCM_INVALID_CDB_FIELD; 970 - } 971 956 check_lba: 972 957 end_lba = dev->transport->get_blocks(dev) + 1; 973 958 if (cmd->t_task_lba + sectors > end_lba) {
+1 -4
drivers/target/target_core_spc.c
··· 505 505 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 506 506 { 507 507 struct se_device *dev = cmd->se_dev; 508 - u32 max_sectors; 509 508 int have_tp = 0; 510 509 int opt, min; 511 510 ··· 538 539 /* 539 540 * Set MAXIMUM TRANSFER LENGTH 540 541 */ 541 - max_sectors = min(dev->dev_attrib.fabric_max_sectors, 542 - dev->dev_attrib.hw_max_sectors); 543 - put_unaligned_be32(max_sectors, &buf[8]); 542 + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); 544 543 545 544 /* 546 545 * Set OPTIMAL TRANSFER LENGTH
-1
drivers/target/target_core_user.c
··· 1118 1118 &tcmu_dev_attrib_hw_block_size.attr, 1119 1119 &tcmu_dev_attrib_block_size.attr, 1120 1120 &tcmu_dev_attrib_hw_max_sectors.attr, 1121 - &tcmu_dev_attrib_fabric_max_sectors.attr, 1122 1121 &tcmu_dev_attrib_optimal_sectors.attr, 1123 1122 &tcmu_dev_attrib_hw_queue_depth.attr, 1124 1123 &tcmu_dev_attrib_queue_depth.attr,
+136 -224
drivers/thermal/cpu_cooling.c
··· 4 4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 5 5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> 6 6 * 7 + * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org> 8 + * 7 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 10 * This program is free software; you can redistribute it and/or modify 9 11 * it under the terms of the GNU General Public License as published by ··· 30 28 #include <linux/cpu.h> 31 29 #include <linux/cpu_cooling.h> 32 30 31 + /* 32 + * Cooling state <-> CPUFreq frequency 33 + * 34 + * Cooling states are translated to frequencies throughout this driver and this 35 + * is the relation between them. 36 + * 37 + * Highest cooling state corresponds to lowest possible frequency. 38 + * 39 + * i.e. 40 + * level 0 --> 1st Max Freq 41 + * level 1 --> 2nd Max Freq 42 + * ... 43 + */ 44 + 33 45 /** 34 46 * struct cpufreq_cooling_device - data for cooling device with cpufreq 35 47 * @id: unique integer value corresponding to each cpufreq_cooling_device ··· 54 38 * cooling devices. 55 39 * @cpufreq_val: integer value representing the absolute value of the clipped 56 40 * frequency. 41 + * @max_level: maximum cooling level. One less than total number of valid 42 + * cpufreq frequencies. 57 43 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. 44 + * @node: list_head to link all cpufreq_cooling_device together. 58 45 * 59 - * This structure is required for keeping information of each 60 - * cpufreq_cooling_device registered. In order to prevent corruption of this a 61 - * mutex lock cooling_cpufreq_lock is used. 46 + * This structure is required for keeping information of each registered 47 + * cpufreq_cooling_device. 62 48 */ 63 49 struct cpufreq_cooling_device { 64 50 int id; 65 51 struct thermal_cooling_device *cool_dev; 66 52 unsigned int cpufreq_state; 67 53 unsigned int cpufreq_val; 54 + unsigned int max_level; 55 + unsigned int *freq_table; /* In descending order */ 68 56 struct cpumask allowed_cpus; 69 57 struct list_head node; 70 58 }; 71 59 static DEFINE_IDR(cpufreq_idr); 72 60 static DEFINE_MUTEX(cooling_cpufreq_lock); 73 - 74 - static unsigned int cpufreq_dev_count; 75 61 76 62 static LIST_HEAD(cpufreq_dev_list); 77 63 ··· 116 98 /* Below code defines functions to be used for cpufreq as cooling device */ 117 99 118 100 /** 119 - * is_cpufreq_valid - function to check frequency transitioning capability. 120 - * @cpu: cpu for which check is needed. 101 + * get_level: Find the level for a particular frequency 102 + * @cpufreq_dev: cpufreq_dev for which the property is required 103 + * @freq: Frequency 121 104 * 122 - * This function will check the current state of the system if 123 - * it is capable of changing the frequency for a given @cpu. 124 - * 125 - * Return: 0 if the system is not currently capable of changing 126 - * the frequency of given cpu. !0 in case the frequency is changeable. 105 + * Return: level on success, THERMAL_CSTATE_INVALID on error. 127 106 */ 128 - static int is_cpufreq_valid(int cpu) 107 + static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev, 108 + unsigned int freq) 129 109 { 130 - struct cpufreq_policy policy; 110 + unsigned long level; 131 111 132 - return !cpufreq_get_policy(&policy, cpu); 133 - } 112 + for (level = 0; level <= cpufreq_dev->max_level; level++) { 113 + if (freq == cpufreq_dev->freq_table[level]) 114 + return level; 134 115 135 - enum cpufreq_cooling_property { 136 - GET_LEVEL, 137 - GET_FREQ, 138 - GET_MAXL, 139 - }; 140 - 141 - /** 142 - * get_property - fetch a property of interest for a give cpu. 143 - * @cpu: cpu for which the property is required 144 - * @input: query parameter 145 - * @output: query return 146 - * @property: type of query (frequency, level, max level) 147 - * 148 - * This is the common function to 149 - * 1. get maximum cpu cooling states 150 - * 2. translate frequency to cooling state 151 - * 3. translate cooling state to frequency 152 - * Note that the code may be not in good shape 153 - * but it is written in this way in order to: 154 - * a) reduce duplicate code as most of the code can be shared. 155 - * b) make sure the logic is consistent when translating between 156 - * cooling states and frequencies. 157 - * 158 - * Return: 0 on success, -EINVAL when invalid parameters are passed. 159 - */ 160 - static int get_property(unsigned int cpu, unsigned long input, 161 - unsigned int *output, 162 - enum cpufreq_cooling_property property) 163 - { 164 - int i; 165 - unsigned long max_level = 0, level = 0; 166 - unsigned int freq = CPUFREQ_ENTRY_INVALID; 167 - int descend = -1; 168 - struct cpufreq_frequency_table *pos, *table = 169 - cpufreq_frequency_get_table(cpu); 170 - 171 - if (!output) 172 - return -EINVAL; 173 - 174 - if (!table) 175 - return -EINVAL; 176 - 177 - cpufreq_for_each_valid_entry(pos, table) { 178 - /* ignore duplicate entry */ 179 - if (freq == pos->frequency) 180 - continue; 181 - 182 - /* get the frequency order */ 183 - if (freq != CPUFREQ_ENTRY_INVALID && descend == -1) 184 - descend = freq > pos->frequency; 185 - 186 - freq = pos->frequency; 187 - max_level++; 116 + if (freq > cpufreq_dev->freq_table[level]) 117 + break; 188 118 } 189 119 190 - /* No valid cpu frequency entry */ 191 - if (max_level == 0) 192 - return -EINVAL; 193 - 194 - /* max_level is an index, not a counter */ 195 - max_level--; 196 - 197 - /* get max level */ 198 - if (property == GET_MAXL) { 199 - *output = (unsigned int)max_level; 200 - return 0; 201 - } 202 - 203 - if (property == GET_FREQ) 204 - level = descend ? input : (max_level - input); 205 - 206 - i = 0; 207 - cpufreq_for_each_valid_entry(pos, table) { 208 - /* ignore duplicate entry */ 209 - if (freq == pos->frequency) 210 - continue; 211 - 212 - /* now we have a valid frequency entry */ 213 - freq = pos->frequency; 214 - 215 - if (property == GET_LEVEL && (unsigned int)input == freq) { 216 - /* get level by frequency */ 217 - *output = descend ? i : (max_level - i); 218 - return 0; 219 - } 220 - if (property == GET_FREQ && level == i) { 221 - /* get frequency by level */ 222 - *output = freq; 223 - return 0; 224 - } 225 - i++; 226 - } 227 - 228 - return -EINVAL; 120 + return THERMAL_CSTATE_INVALID; 229 121 } 230 122 231 123 /** 232 - * cpufreq_cooling_get_level - for a give cpu, return the cooling level. 124 + * cpufreq_cooling_get_level - for a given cpu, return the cooling level. 233 125 * @cpu: cpu for which the level is required 234 126 * @freq: the frequency of interest 235 127 * ··· 151 223 */ 152 224 unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) 153 225 { 154 - unsigned int val; 226 + struct cpufreq_cooling_device *cpufreq_dev; 155 227 156 - if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL)) 157 - return THERMAL_CSTATE_INVALID; 228 + mutex_lock(&cooling_cpufreq_lock); 229 + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 230 + if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { 231 + mutex_unlock(&cooling_cpufreq_lock); 232 + return get_level(cpufreq_dev, freq); 233 + } 234 + } 235 + mutex_unlock(&cooling_cpufreq_lock); 158 236 159 - return (unsigned long)val; 237 + pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); 238 + return THERMAL_CSTATE_INVALID; 160 239 } 161 240 EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level); 162 - 163 - /** 164 - * get_cpu_frequency - get the absolute value of frequency from level. 165 - * @cpu: cpu for which frequency is fetched. 166 - * @level: cooling level 167 - * 168 - * This function matches cooling level with frequency. Based on a cooling level 169 - * of frequency, equals cooling state of cpu cooling device, it will return 170 - * the corresponding frequency. 171 - * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc 172 - * 173 - * Return: 0 on error, the corresponding frequency otherwise. 174 - */ 175 - static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) 176 - { 177 - int ret = 0; 178 - unsigned int freq; 179 - 180 - ret = get_property(cpu, level, &freq, GET_FREQ); 181 - if (ret) 182 - return 0; 183 - 184 - return freq; 185 - } 186 - 187 - /** 188 - * cpufreq_apply_cooling - function to apply frequency clipping. 189 - * @cpufreq_device: cpufreq_cooling_device pointer containing frequency 190 - * clipping data. 191 - * @cooling_state: value of the cooling state. 192 - * 193 - * Function used to make sure the cpufreq layer is aware of current thermal 194 - * limits. The limits are applied by updating the cpufreq policy. 195 - * 196 - * Return: 0 on success, an error code otherwise (-EINVAL in case wrong 197 - * cooling state). 198 - */ 199 - static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, 200 - unsigned long cooling_state) 201 - { 202 - unsigned int cpuid, clip_freq; 203 - struct cpumask *mask = &cpufreq_device->allowed_cpus; 204 - unsigned int cpu = cpumask_any(mask); 205 - 206 - 207 - /* Check if the old cooling action is same as new cooling action */ 208 - if (cpufreq_device->cpufreq_state == cooling_state) 209 - return 0; 210 - 211 - clip_freq = get_cpu_frequency(cpu, cooling_state); 212 - if (!clip_freq) 213 - return -EINVAL; 214 - 215 - cpufreq_device->cpufreq_state = cooling_state; 216 - cpufreq_device->cpufreq_val = clip_freq; 217 - 218 - for_each_cpu(cpuid, mask) { 219 - if (is_cpufreq_valid(cpuid)) 220 - cpufreq_update_policy(cpuid); 221 - } 222 - 223 - return 0; 224 - } 225 241 226 242 /** 227 243 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. ··· 195 323 &cpufreq_dev->allowed_cpus)) 196 324 continue; 197 325 198 - if (!cpufreq_dev->cpufreq_val) 199 - cpufreq_dev->cpufreq_val = get_cpu_frequency( 200 - cpumask_any(&cpufreq_dev->allowed_cpus), 201 - cpufreq_dev->cpufreq_state); 202 - 203 326 max_freq = cpufreq_dev->cpufreq_val; 204 327 205 328 if (policy->max != max_freq) ··· 221 354 unsigned long *state) 222 355 { 223 356 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 224 - struct cpumask *mask = &cpufreq_device->allowed_cpus; 225 - unsigned int cpu; 226 - unsigned int count = 0; 227 - int ret; 228 357 229 - cpu = cpumask_any(mask); 230 - 231 - ret = get_property(cpu, 0, &count, GET_MAXL); 232 - 233 - if (count > 0) 234 - *state = count; 235 - 236 - return ret; 358 + *state = cpufreq_device->max_level; 359 + return 0; 237 360 } 238 361 239 362 /** ··· 260 403 unsigned long state) 261 404 { 262 405 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 406 + unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus); 407 + unsigned int clip_freq; 263 408 264 - return cpufreq_apply_cooling(cpufreq_device, state); 409 + /* Request state should be less than max_level */ 410 + if (WARN_ON(state > cpufreq_device->max_level)) 411 + return -EINVAL; 412 + 413 + /* Check if the old cooling action is same as new cooling action */ 414 + if (cpufreq_device->cpufreq_state == state) 415 + return 0; 416 + 417 + clip_freq = cpufreq_device->freq_table[state]; 418 + cpufreq_device->cpufreq_state = state; 419 + cpufreq_device->cpufreq_val = clip_freq; 420 + 421 + cpufreq_update_policy(cpu); 422 + 423 + return 0; 265 424 } 266 425 267 426 /* Bind cpufreq callbacks to thermal cooling device ops */ ··· 292 419 .notifier_call = cpufreq_thermal_notifier, 293 420 }; 294 421 422 + static unsigned int find_next_max(struct cpufreq_frequency_table *table, 423 + unsigned int prev_max) 424 + { 425 + struct cpufreq_frequency_table *pos; 426 + unsigned int max = 0; 427 + 428 + cpufreq_for_each_valid_entry(pos, table) { 429 + if (pos->frequency > max && pos->frequency < prev_max) 430 + max = pos->frequency; 431 + } 432 + 433 + return max; 434 + } 435 + 295 436 /** 296 437 * __cpufreq_cooling_register - helper function to create cpufreq cooling device 297 438 * @np: a valid struct device_node to the cooling device device tree node 298 439 * @clip_cpus: cpumask of cpus where the frequency constraints will happen. 440 + * Normally this should be same as cpufreq policy->related_cpus. 299 441 * 300 442 * This interface function registers the cpufreq cooling device with the name 301 443 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq ··· 325 437 const struct cpumask *clip_cpus) 326 438 { 327 439 struct thermal_cooling_device *cool_dev; 328 - struct cpufreq_cooling_device *cpufreq_dev = NULL; 329 - unsigned int min = 0, max = 0; 440 + struct cpufreq_cooling_device *cpufreq_dev; 330 441 char dev_name[THERMAL_NAME_LENGTH]; 331 - int ret = 0, i; 332 - struct cpufreq_policy policy; 442 + struct cpufreq_frequency_table *pos, *table; 443 + unsigned int freq, i; 444 + int ret; 333 445 334 - /* Verify that all the clip cpus have same freq_min, freq_max limit */ 335 - for_each_cpu(i, clip_cpus) { 336 - /* continue if cpufreq policy not found and not return error */ 337 - if (!cpufreq_get_policy(&policy, i)) 338 - continue; 339 - if (min == 0 && max == 0) { 340 - min = policy.cpuinfo.min_freq; 341 - max = policy.cpuinfo.max_freq; 342 - } else { 343 - if (min != policy.cpuinfo.min_freq || 344 - max != policy.cpuinfo.max_freq) 345 - return ERR_PTR(-EINVAL); 346 - } 446 + table = cpufreq_frequency_get_table(cpumask_first(clip_cpus)); 447 + if (!table) { 448 + pr_debug("%s: CPUFreq table not found\n", __func__); 449 + return ERR_PTR(-EPROBE_DEFER); 347 450 } 348 - cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), 349 - GFP_KERNEL); 451 + 452 + cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL); 350 453 if (!cpufreq_dev) 351 454 return ERR_PTR(-ENOMEM); 455 + 456 + /* Find max levels */ 457 + cpufreq_for_each_valid_entry(pos, table) 458 + cpufreq_dev->max_level++; 459 + 460 + cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) * 461 + cpufreq_dev->max_level, GFP_KERNEL); 462 + if (!cpufreq_dev->freq_table) { 463 + cool_dev = ERR_PTR(-ENOMEM); 464 + goto free_cdev; 465 + } 466 + 467 + /* max_level is an index, not a counter */ 468 + cpufreq_dev->max_level--; 352 469 353 470 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 354 471 355 472 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 356 473 if (ret) { 357 - kfree(cpufreq_dev); 358 - return ERR_PTR(-EINVAL); 474 + cool_dev = ERR_PTR(ret); 475 + goto free_table; 359 476 } 360 477 361 478 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", ··· 368 475 369 476 cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, 370 477 &cpufreq_cooling_ops); 371 - if (IS_ERR(cool_dev)) { 372 - release_idr(&cpufreq_idr, cpufreq_dev->id); 373 - kfree(cpufreq_dev); 374 - return cool_dev; 478 + if (IS_ERR(cool_dev)) 479 + goto remove_idr; 480 + 481 + /* Fill freq-table in descending order of frequencies */ 482 + for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { 483 + freq = find_next_max(table, freq); 484 + cpufreq_dev->freq_table[i] = freq; 485 + 486 + /* Warn for duplicate entries */ 487 + if (!freq) 488 + pr_warn("%s: table has duplicate entries\n", __func__); 489 + else 490 + pr_debug("%s: freq:%u KHz\n", __func__, freq); 375 491 } 492 + 493 + cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0]; 376 494 cpufreq_dev->cool_dev = cool_dev; 377 - cpufreq_dev->cpufreq_state = 0; 495 + 378 496 mutex_lock(&cooling_cpufreq_lock); 379 497 380 498 /* Register the notifier for first cpufreq cooling device */ 381 - if (cpufreq_dev_count == 0) 499 + if (list_empty(&cpufreq_dev_list)) 382 500 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 383 501 CPUFREQ_POLICY_NOTIFIER); 384 - cpufreq_dev_count++; 385 502 list_add(&cpufreq_dev->node, &cpufreq_dev_list); 386 503 387 504 mutex_unlock(&cooling_cpufreq_lock); 505 + 506 + return cool_dev; 507 + 508 + remove_idr: 509 + release_idr(&cpufreq_idr, cpufreq_dev->id); 510 + free_table: 511 + kfree(cpufreq_dev->freq_table); 512 + free_cdev: 513 + kfree(cpufreq_dev); 388 514 389 515 return cool_dev; 390 516 } ··· 466 554 cpufreq_dev = cdev->devdata; 467 555 mutex_lock(&cooling_cpufreq_lock); 468 556 list_del(&cpufreq_dev->node); 469 - cpufreq_dev_count--; 470 557 471 558 /* Unregister the notifier for the last cpufreq cooling device */ 472 - if (cpufreq_dev_count == 0) 559 + if (list_empty(&cpufreq_dev_list)) 473 560 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 474 561 CPUFREQ_POLICY_NOTIFIER); 475 562 mutex_unlock(&cooling_cpufreq_lock); 476 563 477 564 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 478 565 release_idr(&cpufreq_idr, cpufreq_dev->id); 566 + kfree(cpufreq_dev->freq_table); 479 567 kfree(cpufreq_dev); 480 568 } 481 569 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
+9 -11
drivers/thermal/db8500_cpufreq_cooling.c
··· 18 18 */ 19 19 20 20 #include <linux/cpu_cooling.h> 21 - #include <linux/cpufreq.h> 22 21 #include <linux/err.h> 23 22 #include <linux/module.h> 24 23 #include <linux/of.h> ··· 27 28 static int db8500_cpufreq_cooling_probe(struct platform_device *pdev) 28 29 { 29 30 struct thermal_cooling_device *cdev; 30 - struct cpumask mask_val; 31 31 32 - /* make sure cpufreq driver has been initialized */ 33 - if (!cpufreq_frequency_get_table(0)) 34 - return -EPROBE_DEFER; 35 - 36 - cpumask_set_cpu(0, &mask_val); 37 - cdev = cpufreq_cooling_register(&mask_val); 38 - 32 + cdev = cpufreq_cooling_register(cpu_present_mask); 39 33 if (IS_ERR(cdev)) { 40 - dev_err(&pdev->dev, "Failed to register cooling device\n"); 41 - return PTR_ERR(cdev); 34 + int ret = PTR_ERR(cdev); 35 + 36 + if (ret != -EPROBE_DEFER) 37 + dev_err(&pdev->dev, 38 + "Failed to register cooling device %d\n", 39 + ret); 40 + 41 + return ret; 42 42 } 43 43 44 44 platform_set_drvdata(pdev, cdev);
+7 -10
drivers/thermal/imx_thermal.c
··· 9 9 10 10 #include <linux/clk.h> 11 11 #include <linux/cpu_cooling.h> 12 - #include <linux/cpufreq.h> 13 12 #include <linux/delay.h> 14 13 #include <linux/device.h> 15 14 #include <linux/init.h> ··· 453 454 const struct of_device_id *of_id = 454 455 of_match_device(of_imx_thermal_match, &pdev->dev); 455 456 struct imx_thermal_data *data; 456 - struct cpumask clip_cpus; 457 457 struct regmap *map; 458 458 int measure_freq; 459 459 int ret; 460 460 461 - if (!cpufreq_get_current_driver()) { 462 - dev_dbg(&pdev->dev, "no cpufreq driver!"); 463 - return -EPROBE_DEFER; 464 - } 465 461 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 466 462 if (!data) 467 463 return -ENOMEM; ··· 510 516 regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF); 511 517 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); 512 518 513 - cpumask_set_cpu(0, &clip_cpus); 514 - data->cdev = cpufreq_cooling_register(&clip_cpus); 519 + data->cdev = cpufreq_cooling_register(cpu_present_mask); 515 520 if (IS_ERR(data->cdev)) { 516 521 ret = PTR_ERR(data->cdev); 517 - dev_err(&pdev->dev, 518 - "failed to register cpufreq cooling device: %d\n", ret); 522 + if (ret != -EPROBE_DEFER) 523 + dev_err(&pdev->dev, 524 + "failed to register cpufreq cooling device: %d\n", 525 + ret); 519 526 return ret; 520 527 } 521 528 ··· 608 613 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); 609 614 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); 610 615 data->mode = THERMAL_DEVICE_DISABLED; 616 + clk_disable_unprepare(data->thermal_clk); 611 617 612 618 return 0; 613 619 } ··· 618 622 struct imx_thermal_data *data = dev_get_drvdata(dev); 619 623 struct regmap *map = data->tempmon; 620 624 625 + clk_prepare_enable(data->thermal_clk); 621 626 /* Enabled thermal sensor after resume */ 622 627 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 623 628 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+1
drivers/thermal/int340x_thermal/Makefile
··· 1 1 obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o 2 2 obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o 3 3 obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o 4 + obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o 4 5 obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
+8 -16
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
··· 82 82 struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" }; 83 83 84 84 if (!acpi_has_method(handle, "_TRT")) 85 - return 0; 85 + return -ENODEV; 86 86 87 87 status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer); 88 88 if (ACPI_FAILURE(status)) ··· 119 119 continue; 120 120 121 121 result = acpi_bus_get_device(trt->source, &adev); 122 - if (!result) 123 - acpi_create_platform_device(adev); 124 - else 122 + if (result) 125 123 pr_warn("Failed to get source ACPI device\n"); 126 124 127 125 result = acpi_bus_get_device(trt->target, &adev); 128 - if (!result) 129 - acpi_create_platform_device(adev); 130 - else 126 + if (result) 131 127 pr_warn("Failed to get target ACPI device\n"); 132 128 } 133 129 ··· 163 167 sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" }; 164 168 165 169 if (!acpi_has_method(handle, "_ART")) 166 - return 0; 170 + return -ENODEV; 167 171 168 172 status = acpi_evaluate_object(handle, "_ART", NULL, &buffer); 169 173 if (ACPI_FAILURE(status)) ··· 202 206 203 207 if (art->source) { 204 208 result = acpi_bus_get_device(art->source, &adev); 205 - if (!result) 206 - acpi_create_platform_device(adev); 207 - else 209 + if (result) 208 210 pr_warn("Failed to get source ACPI device\n"); 209 211 } 210 212 if (art->target) { 211 213 result = acpi_bus_get_device(art->target, &adev); 212 - if (!result) 213 - acpi_create_platform_device(adev); 214 - else 214 + if (result) 215 215 pr_warn("Failed to get source ACPI device\n"); 216 216 } 217 217 } ··· 313 321 unsigned long length = 0; 314 322 int count = 0; 315 323 char __user *arg = (void __user *)__arg; 316 - struct trt *trts; 317 - struct art *arts; 324 + struct trt *trts = NULL; 325 + struct art *arts = NULL; 318 326 319 327 switch (cmd) { 320 328 case ACPI_THERMAL_GET_TRT_COUNT:
-1
drivers/thermal/int340x_thermal/int3400_thermal.c
··· 335 335 .remove = int3400_thermal_remove, 336 336 .driver = { 337 337 .name = "int3400 thermal", 338 - .owner = THIS_MODULE, 339 338 .acpi_match_table = ACPI_PTR(int3400_thermal_match), 340 339 }, 341 340 };
-1
drivers/thermal/int340x_thermal/int3402_thermal.c
··· 231 231 .remove = int3402_thermal_remove, 232 232 .driver = { 233 233 .name = "int3402 thermal", 234 - .owner = THIS_MODULE, 235 234 .acpi_match_table = int3402_thermal_match, 236 235 }, 237 236 };
+4
drivers/thermal/int340x_thermal/int3403_thermal.c
··· 301 301 { 302 302 struct int3403_sensor *obj = priv->priv; 303 303 304 + acpi_remove_notify_handler(priv->adev->handle, 305 + ACPI_DEVICE_NOTIFY, int3403_notify); 304 306 thermal_zone_device_unregister(obj->tzone); 305 307 return 0; 306 308 } ··· 371 369 p = buf.pointer; 372 370 if (!p || (p->type != ACPI_TYPE_PACKAGE)) { 373 371 printk(KERN_WARNING "Invalid PPSS data\n"); 372 + kfree(buf.pointer); 374 373 return -EFAULT; 375 374 } 376 375 ··· 384 381 385 382 priv->priv = obj; 386 383 384 + kfree(buf.pointer); 387 385 /* TODO: add ACPI notification support */ 388 386 389 387 return result;
+311
drivers/thermal/int340x_thermal/processor_thermal_device.c
··· 1 + /* 2 + * processor_thermal_device.c 3 + * Copyright (c) 2014, Intel Corporation. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + */ 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/init.h> 18 + #include <linux/pci.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/acpi.h> 21 + 22 + /* Broadwell-U/HSB thermal reporting device */ 23 + #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 24 + #define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03 25 + 26 + /* Braswell thermal reporting device */ 27 + #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC 28 + 29 + struct power_config { 30 + u32 index; 31 + u32 min_uw; 32 + u32 max_uw; 33 + u32 tmin_us; 34 + u32 tmax_us; 35 + u32 step_uw; 36 + }; 37 + 38 + struct proc_thermal_device { 39 + struct device *dev; 40 + struct acpi_device *adev; 41 + struct power_config power_limits[2]; 42 + }; 43 + 44 + enum proc_thermal_emum_mode_type { 45 + PROC_THERMAL_NONE, 46 + PROC_THERMAL_PCI, 47 + PROC_THERMAL_PLATFORM_DEV 48 + }; 49 + 50 + /* 51 + * We can have only one type of enumeration, PCI or Platform, 52 + * not both. So we don't need instance specific data. 53 + */ 54 + static enum proc_thermal_emum_mode_type proc_thermal_emum_mode = 55 + PROC_THERMAL_NONE; 56 + 57 + #define POWER_LIMIT_SHOW(index, suffix) \ 58 + static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \ 59 + struct device_attribute *attr, \ 60 + char *buf) \ 61 + { \ 62 + struct pci_dev *pci_dev; \ 63 + struct platform_device *pdev; \ 64 + struct proc_thermal_device *proc_dev; \ 65 + \ 66 + if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ 67 + pdev = to_platform_device(dev); \ 68 + proc_dev = platform_get_drvdata(pdev); \ 69 + } else { \ 70 + pci_dev = to_pci_dev(dev); \ 71 + proc_dev = pci_get_drvdata(pci_dev); \ 72 + } \ 73 + return sprintf(buf, "%lu\n",\ 74 + (unsigned long)proc_dev->power_limits[index].suffix * 1000); \ 75 + } 76 + 77 + POWER_LIMIT_SHOW(0, min_uw) 78 + POWER_LIMIT_SHOW(0, max_uw) 79 + POWER_LIMIT_SHOW(0, step_uw) 80 + POWER_LIMIT_SHOW(0, tmin_us) 81 + POWER_LIMIT_SHOW(0, tmax_us) 82 + 83 + POWER_LIMIT_SHOW(1, min_uw) 84 + POWER_LIMIT_SHOW(1, max_uw) 85 + POWER_LIMIT_SHOW(1, step_uw) 86 + POWER_LIMIT_SHOW(1, tmin_us) 87 + POWER_LIMIT_SHOW(1, tmax_us) 88 + 89 + static DEVICE_ATTR_RO(power_limit_0_min_uw); 90 + static DEVICE_ATTR_RO(power_limit_0_max_uw); 91 + static DEVICE_ATTR_RO(power_limit_0_step_uw); 92 + static DEVICE_ATTR_RO(power_limit_0_tmin_us); 93 + static DEVICE_ATTR_RO(power_limit_0_tmax_us); 94 + 95 + static DEVICE_ATTR_RO(power_limit_1_min_uw); 96 + static DEVICE_ATTR_RO(power_limit_1_max_uw); 97 + static DEVICE_ATTR_RO(power_limit_1_step_uw); 98 + static DEVICE_ATTR_RO(power_limit_1_tmin_us); 99 + static DEVICE_ATTR_RO(power_limit_1_tmax_us); 100 + 101 + static struct attribute *power_limit_attrs[] = { 102 + &dev_attr_power_limit_0_min_uw.attr, 103 + &dev_attr_power_limit_1_min_uw.attr, 104 + &dev_attr_power_limit_0_max_uw.attr, 105 + &dev_attr_power_limit_1_max_uw.attr, 106 + &dev_attr_power_limit_0_step_uw.attr, 107 + &dev_attr_power_limit_1_step_uw.attr, 108 + &dev_attr_power_limit_0_tmin_us.attr, 109 + &dev_attr_power_limit_1_tmin_us.attr, 110 + &dev_attr_power_limit_0_tmax_us.attr, 111 + &dev_attr_power_limit_1_tmax_us.attr, 112 + NULL 113 + }; 114 + 115 + static struct attribute_group power_limit_attribute_group = { 116 + .attrs = power_limit_attrs, 117 + .name = "power_limits" 118 + }; 119 + 120 + static int proc_thermal_add(struct device *dev, 121 + struct proc_thermal_device **priv) 122 + { 123 + struct proc_thermal_device *proc_priv; 124 + struct acpi_device *adev; 125 + acpi_status status; 126 + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 127 + union acpi_object *elements, *ppcc; 128 + union acpi_object *p; 129 + int i; 130 + int ret; 131 + 132 + adev = ACPI_COMPANION(dev); 133 + if (!adev) 134 + return -ENODEV; 135 + 136 + status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); 137 + if (ACPI_FAILURE(status)) 138 + return -ENODEV; 139 + 140 + p = buf.pointer; 141 + if (!p || (p->type != ACPI_TYPE_PACKAGE)) { 142 + dev_err(dev, "Invalid PPCC data\n"); 143 + ret = -EFAULT; 144 + goto free_buffer; 145 + } 146 + if (!p->package.count) { 147 + dev_err(dev, "Invalid PPCC package size\n"); 148 + ret = -EFAULT; 149 + goto free_buffer; 150 + } 151 + 152 + proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL); 153 + if (!proc_priv) { 154 + ret = -ENOMEM; 155 + goto free_buffer; 156 + } 157 + 158 + proc_priv->dev = dev; 159 + proc_priv->adev = adev; 160 + 161 + for (i = 0; i < min((int)p->package.count - 1, 2); ++i) { 162 + elements = &(p->package.elements[i+1]); 163 + if (elements->type != ACPI_TYPE_PACKAGE || 164 + elements->package.count != 6) { 165 + ret = -EFAULT; 166 + goto free_buffer; 167 + } 168 + ppcc = elements->package.elements; 169 + proc_priv->power_limits[i].index = ppcc[0].integer.value; 170 + proc_priv->power_limits[i].min_uw = ppcc[1].integer.value; 171 + proc_priv->power_limits[i].max_uw = ppcc[2].integer.value; 172 + proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value; 173 + proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value; 174 + proc_priv->power_limits[i].step_uw = ppcc[5].integer.value; 175 + } 176 + 177 + *priv = proc_priv; 178 + 179 + ret = sysfs_create_group(&dev->kobj, 180 + &power_limit_attribute_group); 181 + 182 + free_buffer: 183 + kfree(buf.pointer); 184 + 185 + return ret; 186 + } 187 + 188 + void proc_thermal_remove(struct proc_thermal_device *proc_priv) 189 + { 190 + sysfs_remove_group(&proc_priv->dev->kobj, 191 + &power_limit_attribute_group); 192 + } 193 + 194 + static int int3401_add(struct platform_device *pdev) 195 + { 196 + struct proc_thermal_device *proc_priv; 197 + int ret; 198 + 199 + if (proc_thermal_emum_mode == PROC_THERMAL_PCI) { 200 + dev_err(&pdev->dev, "error: enumerated as PCI dev\n"); 201 + return -ENODEV; 202 + } 203 + 204 + ret = proc_thermal_add(&pdev->dev, &proc_priv); 205 + if (ret) 206 + return ret; 207 + 208 + platform_set_drvdata(pdev, proc_priv); 209 + proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; 210 + 211 + return 0; 212 + } 213 + 214 + static int int3401_remove(struct platform_device *pdev) 215 + { 216 + proc_thermal_remove(platform_get_drvdata(pdev)); 217 + 218 + return 0; 219 + } 220 + 221 + static int proc_thermal_pci_probe(struct pci_dev *pdev, 222 + const struct pci_device_id *unused) 223 + { 224 + struct proc_thermal_device *proc_priv; 225 + int ret; 226 + 227 + if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { 228 + dev_err(&pdev->dev, "error: enumerated as platform dev\n"); 229 + return -ENODEV; 230 + } 231 + 232 + ret = pci_enable_device(pdev); 233 + if (ret < 0) { 234 + dev_err(&pdev->dev, "error: could not enable device\n"); 235 + return ret; 236 + } 237 + 238 + ret = proc_thermal_add(&pdev->dev, &proc_priv); 239 + if (ret) { 240 + pci_disable_device(pdev); 241 + return ret; 242 + } 243 + 244 + pci_set_drvdata(pdev, proc_priv); 245 + proc_thermal_emum_mode = PROC_THERMAL_PCI; 246 + 247 + return 0; 248 + } 249 + 250 + static void proc_thermal_pci_remove(struct pci_dev *pdev) 251 + { 252 + proc_thermal_remove(pci_get_drvdata(pdev)); 253 + pci_disable_device(pdev); 254 + } 255 + 256 + static const struct pci_device_id proc_thermal_pci_ids[] = { 257 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, 258 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, 259 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)}, 260 + { 0, }, 261 + }; 262 + 263 + MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids); 264 + 265 + static struct pci_driver proc_thermal_pci_driver = { 266 + .name = "proc_thermal", 267 + .probe = proc_thermal_pci_probe, 268 + .remove = proc_thermal_pci_remove, 269 + .id_table = proc_thermal_pci_ids, 270 + }; 271 + 272 + static const struct acpi_device_id int3401_device_ids[] = { 273 + {"INT3401", 0}, 274 + {"", 0}, 275 + }; 276 + MODULE_DEVICE_TABLE(acpi, int3401_device_ids); 277 + 278 + static struct platform_driver int3401_driver = { 279 + .probe = int3401_add, 280 + .remove = int3401_remove, 281 + .driver = { 282 + .name = "int3401 thermal", 283 + .acpi_match_table = int3401_device_ids, 284 + }, 285 + }; 286 + 287 + static int __init proc_thermal_init(void) 288 + { 289 + int ret; 290 + 291 + ret = platform_driver_register(&int3401_driver); 292 + if (ret) 293 + return ret; 294 + 295 + ret = pci_register_driver(&proc_thermal_pci_driver); 296 + 297 + return ret; 298 + } 299 + 300 + static void __exit proc_thermal_exit(void) 301 + { 302 + platform_driver_unregister(&int3401_driver); 303 + pci_unregister_driver(&proc_thermal_pci_driver); 304 + } 305 + 306 + module_init(proc_thermal_init); 307 + module_exit(proc_thermal_exit); 308 + 309 + MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); 310 + MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver"); 311 + MODULE_LICENSE("GPL v2");
+1
drivers/thermal/intel_powerclamp.c
··· 688 688 { X86_VENDOR_INTEL, 6, 0x45}, 689 689 { X86_VENDOR_INTEL, 6, 0x46}, 690 690 { X86_VENDOR_INTEL, 6, 0x4c}, 691 + { X86_VENDOR_INTEL, 6, 0x56}, 691 692 {} 692 693 }; 693 694 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+1 -1
drivers/thermal/of-thermal.c
··· 149 149 * 150 150 * Return: pointer to trip points table, NULL otherwise 151 151 */ 152 - const struct thermal_trip * const 152 + const struct thermal_trip * 153 153 of_thermal_get_trip_points(struct thermal_zone_device *tz) 154 154 { 155 155 struct __thermal_zone *data = tz->devdata;
+11 -6
drivers/thermal/rcar_thermal.c
··· 63 63 struct mutex lock; 64 64 struct list_head list; 65 65 int id; 66 - int ctemp; 66 + u32 ctemp; 67 67 }; 68 68 69 69 #define rcar_thermal_for_each_priv(pos, common) \ ··· 145 145 { 146 146 struct device *dev = rcar_priv_to_dev(priv); 147 147 int i; 148 - int ctemp, old, new; 148 + u32 ctemp, old, new; 149 149 int ret = -EINVAL; 150 150 151 151 mutex_lock(&priv->lock); ··· 372 372 int i; 373 373 int ret = -ENODEV; 374 374 int idle = IDLE_INTERVAL; 375 + u32 enr_bits = 0; 375 376 376 377 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); 377 378 if (!common) ··· 391 390 392 391 /* 393 392 * platform has IRQ support. 394 - * Then, drier use common register 393 + * Then, driver uses common registers 395 394 */ 396 395 397 396 ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, ··· 408 407 common->base = devm_ioremap_resource(dev, res); 409 408 if (IS_ERR(common->base)) 410 409 return PTR_ERR(common->base); 411 - 412 - /* enable temperature comparation */ 413 - rcar_thermal_common_write(common, ENR, 0x00030303); 414 410 415 411 idle = 0; /* polling delay is not needed */ 416 412 } ··· 450 452 rcar_thermal_irq_enable(priv); 451 453 452 454 list_move_tail(&priv->list, &common->head); 455 + 456 + /* update ENR bits */ 457 + enr_bits |= 3 << (i * 8); 453 458 } 459 + 460 + /* enable temperature comparation */ 461 + if (irq) 462 + rcar_thermal_common_write(common, ENR, enr_bits); 454 463 455 464 platform_set_drvdata(pdev, common); 456 465
-1
drivers/thermal/rockchip_thermal.c
··· 677 677 static struct platform_driver rockchip_thermal_driver = { 678 678 .driver = { 679 679 .name = "rockchip-thermal", 680 - .owner = THIS_MODULE, 681 680 .pm = &rockchip_thermal_pm_ops, 682 681 .of_match_table = of_rockchip_thermal_match, 683 682 },
+1 -1
drivers/thermal/samsung/Kconfig
··· 1 1 config EXYNOS_THERMAL 2 2 tristate "Exynos thermal management unit driver" 3 - depends on ARCH_HAS_BANDGAP && OF 3 + depends on OF 4 4 help 5 5 If you say yes here you get support for the TMU (Thermal Management 6 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
+6 -6
drivers/thermal/samsung/exynos_thermal_common.c
··· 347 347 int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) 348 348 { 349 349 int ret; 350 - struct cpumask mask_val; 351 350 struct exynos_thermal_zone *th_zone; 352 351 353 352 if (!sensor_conf || !sensor_conf->read_temperature) { ··· 366 367 * sensor 367 368 */ 368 369 if (sensor_conf->cooling_data.freq_clip_count > 0) { 369 - cpumask_set_cpu(0, &mask_val); 370 370 th_zone->cool_dev[th_zone->cool_dev_size] = 371 - cpufreq_cooling_register(&mask_val); 371 + cpufreq_cooling_register(cpu_present_mask); 372 372 if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) { 373 - dev_err(sensor_conf->dev, 374 - "Failed to register cpufreq cooling device\n"); 375 - ret = -EINVAL; 373 + ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]); 374 + if (ret != -EPROBE_DEFER) 375 + dev_err(sensor_conf->dev, 376 + "Failed to register cpufreq cooling device: %d\n", 377 + ret); 376 378 goto err_unregister; 377 379 } 378 380 th_zone->cool_dev_size++;
+4 -1
drivers/thermal/samsung/exynos_tmu.c
··· 927 927 /* Register the sensor with thermal management interface */ 928 928 ret = exynos_register_thermal(sensor_conf); 929 929 if (ret) { 930 - dev_err(&pdev->dev, "Failed to register thermal interface\n"); 930 + if (ret != -EPROBE_DEFER) 931 + dev_err(&pdev->dev, 932 + "Failed to register thermal interface: %d\n", 933 + ret); 931 934 goto err_clk; 932 935 } 933 936 data->reg_conf = sensor_conf;
+4 -2
drivers/thermal/thermal_core.c
··· 930 930 struct thermal_zone_device *pos1; 931 931 struct thermal_cooling_device *pos2; 932 932 unsigned long max_state; 933 - int result; 933 + int result, ret; 934 934 935 935 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) 936 936 return -EINVAL; ··· 947 947 if (tz != pos1 || cdev != pos2) 948 948 return -EINVAL; 949 949 950 - cdev->ops->get_max_state(cdev, &max_state); 950 + ret = cdev->ops->get_max_state(cdev, &max_state); 951 + if (ret) 952 + return ret; 951 953 952 954 /* lower default 0, upper default max_state */ 953 955 lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
+2 -2
drivers/thermal/thermal_core.h
··· 91 91 void of_thermal_destroy_zones(void); 92 92 int of_thermal_get_ntrips(struct thermal_zone_device *); 93 93 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); 94 - const struct thermal_trip * const 94 + const struct thermal_trip * 95 95 of_thermal_get_trip_points(struct thermal_zone_device *); 96 96 #else 97 97 static inline int of_parse_thermal_zones(void) { return 0; } ··· 105 105 { 106 106 return 0; 107 107 } 108 - static inline const struct thermal_trip * const 108 + static inline const struct thermal_trip * 109 109 of_thermal_get_trip_points(struct thermal_zone_device *tz) 110 110 { 111 111 return NULL;
+8 -9
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
··· 28 28 #include <linux/kernel.h> 29 29 #include <linux/workqueue.h> 30 30 #include <linux/thermal.h> 31 - #include <linux/cpufreq.h> 32 31 #include <linux/cpumask.h> 33 32 #include <linux/cpu_cooling.h> 34 33 #include <linux/of.h> ··· 406 407 if (!data) 407 408 return -EINVAL; 408 409 409 - if (!cpufreq_get_current_driver()) { 410 - dev_dbg(bgp->dev, "no cpufreq driver yet\n"); 411 - return -EPROBE_DEFER; 412 - } 413 - 414 410 /* Register cooling device */ 415 411 data->cool_dev = cpufreq_cooling_register(cpu_present_mask); 416 412 if (IS_ERR(data->cool_dev)) { 417 - dev_err(bgp->dev, 418 - "Failed to register cpufreq cooling device\n"); 419 - return PTR_ERR(data->cool_dev); 413 + int ret = PTR_ERR(data->cool_dev); 414 + 415 + if (ret != -EPROBE_DEFER) 416 + dev_err(bgp->dev, 417 + "Failed to register cpu cooling device %d\n", 418 + ret); 419 + 420 + return ret; 420 421 } 421 422 ti_bandgap_set_sensor_data(bgp, id, data); 422 423
+2 -7
drivers/tty/n_tty.c
··· 2399 2399 2400 2400 poll_wait(file, &tty->read_wait, wait); 2401 2401 poll_wait(file, &tty->write_wait, wait); 2402 - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 2403 - mask |= POLLHUP; 2404 2402 if (input_available_p(tty, 1)) 2405 2403 mask |= POLLIN | POLLRDNORM; 2406 - else if (mask & POLLHUP) { 2407 - tty_flush_to_ldisc(tty); 2408 - if (input_available_p(tty, 1)) 2409 - mask |= POLLIN | POLLRDNORM; 2410 - } 2411 2404 if (tty->packet && tty->link->ctrl_status) 2412 2405 mask |= POLLPRI | POLLIN | POLLRDNORM; 2406 + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 2407 + mask |= POLLHUP; 2413 2408 if (tty_hung_up_p(file)) 2414 2409 mask |= POLLHUP; 2415 2410 if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+26 -3
drivers/tty/serial/8250/8250_pci.c
··· 1815 1815 } 1816 1816 1817 1817 static int 1818 - pci_wch_ch382_setup(struct serial_private *priv, 1818 + pci_wch_ch38x_setup(struct serial_private *priv, 1819 1819 const struct pciserial_board *board, 1820 1820 struct uart_8250_port *port, int idx) 1821 1821 { ··· 1880 1880 1881 1881 #define PCIE_VENDOR_ID_WCH 0x1c00 1882 1882 #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1883 + #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 1883 1884 1884 1885 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ 1885 1886 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 ··· 2572 2571 .subdevice = PCI_ANY_ID, 2573 2572 .setup = pci_wch_ch353_setup, 2574 2573 }, 2575 - /* WCH CH382 2S1P card (16750 clone) */ 2574 + /* WCH CH382 2S1P card (16850 clone) */ 2576 2575 { 2577 2576 .vendor = PCIE_VENDOR_ID_WCH, 2578 2577 .device = PCIE_DEVICE_ID_WCH_CH382_2S1P, 2579 2578 .subvendor = PCI_ANY_ID, 2580 2579 .subdevice = PCI_ANY_ID, 2581 - .setup = pci_wch_ch382_setup, 2580 + .setup = pci_wch_ch38x_setup, 2581 + }, 2582 + /* WCH CH384 4S card (16850 clone) */ 2583 + { 2584 + .vendor = PCIE_VENDOR_ID_WCH, 2585 + .device = PCIE_DEVICE_ID_WCH_CH384_4S, 2586 + .subvendor = PCI_ANY_ID, 2587 + .subdevice = PCI_ANY_ID, 2588 + .setup = pci_wch_ch38x_setup, 2582 2589 }, 2583 2590 /* 2584 2591 * ASIX devices with FIFO bug ··· 2885 2876 pbn_fintek_4, 2886 2877 pbn_fintek_8, 2887 2878 pbn_fintek_12, 2879 + pbn_wch384_4, 2888 2880 }; 2889 2881 2890 2882 /* ··· 3685 3675 .base_baud = 115200, 3686 3676 .first_offset = 0x40, 3687 3677 }, 3678 + 3679 + [pbn_wch384_4] = { 3680 + .flags = FL_BASE0, 3681 + .num_ports = 4, 3682 + .base_baud = 115200, 3683 + .uart_offset = 8, 3684 + .first_offset = 0xC0, 3685 + }, 3688 3686 }; 3689 3687 3690 3688 static const struct pci_device_id blacklist[] = { ··· 3705 3687 { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ 3706 3688 { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ 3707 3689 { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ 3690 + { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */ 3708 3691 }; 3709 3692 3710 3693 /* ··· 5418 5399 { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, 5419 5400 PCI_ANY_ID, PCI_ANY_ID, 5420 5401 0, 0, pbn_b0_bt_2_115200 }, 5402 + 5403 + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5404 + PCI_ANY_ID, PCI_ANY_ID, 5405 + 0, 0, pbn_wch384_4 }, 5421 5406 5422 5407 /* 5423 5408 * Commtech, Inc. Fastcom adapters
+36 -20
drivers/tty/serial/samsung.c
··· 1757 1757 #endif 1758 1758 1759 1759 #if defined(CONFIG_ARCH_EXYNOS) 1760 + #define EXYNOS_COMMON_SERIAL_DRV_DATA \ 1761 + .info = &(struct s3c24xx_uart_info) { \ 1762 + .name = "Samsung Exynos UART", \ 1763 + .type = PORT_S3C6400, \ 1764 + .has_divslot = 1, \ 1765 + .rx_fifomask = S5PV210_UFSTAT_RXMASK, \ 1766 + .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \ 1767 + .rx_fifofull = S5PV210_UFSTAT_RXFULL, \ 1768 + .tx_fifofull = S5PV210_UFSTAT_TXFULL, \ 1769 + .tx_fifomask = S5PV210_UFSTAT_TXMASK, \ 1770 + .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \ 1771 + .def_clk_sel = S3C2410_UCON_CLKSEL0, \ 1772 + .num_clks = 1, \ 1773 + .clksel_mask = 0, \ 1774 + .clksel_shift = 0, \ 1775 + }, \ 1776 + .def_cfg = &(struct s3c2410_uartcfg) { \ 1777 + .ucon = S5PV210_UCON_DEFAULT, \ 1778 + .ufcon = S5PV210_UFCON_DEFAULT, \ 1779 + .has_fracval = 1, \ 1780 + } \ 1781 + 1760 1782 static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { 1761 - .info = &(struct s3c24xx_uart_info) { 1762 - .name = "Samsung Exynos4 UART", 1763 - .type = PORT_S3C6400, 1764 - .has_divslot = 1, 1765 - .rx_fifomask = S5PV210_UFSTAT_RXMASK, 1766 - .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, 1767 - .rx_fifofull = S5PV210_UFSTAT_RXFULL, 1768 - .tx_fifofull = S5PV210_UFSTAT_TXFULL, 1769 - .tx_fifomask = S5PV210_UFSTAT_TXMASK, 1770 - .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, 1771 - .def_clk_sel = S3C2410_UCON_CLKSEL0, 1772 - .num_clks = 1, 1773 - .clksel_mask = 0, 1774 - .clksel_shift = 0, 1775 - }, 1776 - .def_cfg = &(struct s3c2410_uartcfg) { 1777 - .ucon = S5PV210_UCON_DEFAULT, 1778 - .ufcon = S5PV210_UFCON_DEFAULT, 1779 - .has_fracval = 1, 1780 - }, 1783 + EXYNOS_COMMON_SERIAL_DRV_DATA, 1781 1784 .fifosize = { 256, 64, 16, 16 }, 1782 1785 }; 1786 + 1787 + static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = { 1788 + EXYNOS_COMMON_SERIAL_DRV_DATA, 1789 + .fifosize = { 64, 256, 16, 256 }, 1790 + }; 1791 + 1783 1792 #define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data) 1793 + #define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data) 1784 1794 #else 1785 1795 #define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL 1796 + #define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL 1786 1797 #endif 1787 1798 1788 1799 static struct platform_device_id s3c24xx_serial_driver_ids[] = { ··· 1815 1804 }, { 1816 1805 .name = "exynos4210-uart", 1817 1806 .driver_data = EXYNOS4210_SERIAL_DRV_DATA, 1807 + }, { 1808 + .name = "exynos5433-uart", 1809 + .driver_data = EXYNOS5433_SERIAL_DRV_DATA, 1818 1810 }, 1819 1811 { }, 1820 1812 }; ··· 1837 1823 .data = (void *)S5PV210_SERIAL_DRV_DATA }, 1838 1824 { .compatible = "samsung,exynos4210-uart", 1839 1825 .data = (void *)EXYNOS4210_SERIAL_DRV_DATA }, 1826 + { .compatible = "samsung,exynos5433-uart", 1827 + .data = (void *)EXYNOS5433_SERIAL_DRV_DATA }, 1840 1828 {}, 1841 1829 }; 1842 1830 MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
+3 -1
drivers/tty/serial/serial_core.c
··· 2164 2164 break; 2165 2165 } 2166 2166 2167 - dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n", 2167 + printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n", 2168 + port->dev ? dev_name(port->dev) : "", 2169 + port->dev ? ": " : "", 2168 2170 drv->dev_name, 2169 2171 drv->tty_driver->name_base + port->line, 2170 2172 address, port->irq, port->uartclk / 16, uart_type(port));
+3 -4
drivers/tty/tty_io.c
··· 1464 1464 driver->subtype == PTY_TYPE_MASTER) 1465 1465 return -EIO; 1466 1466 1467 + if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1468 + return -EBUSY; 1469 + 1467 1470 tty->count++; 1468 1471 1469 1472 WARN_ON(!tty->ldisc); ··· 2108 2105 else 2109 2106 retval = -ENODEV; 2110 2107 filp->f_flags = saved_flags; 2111 - 2112 - if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && 2113 - !capable(CAP_SYS_ADMIN)) 2114 - retval = -EBUSY; 2115 2108 2116 2109 if (retval) { 2117 2110 #ifdef TTY_DEBUG_HANGUP
+1 -1
drivers/usb/chipidea/core.c
··· 669 669 if (!ci) 670 670 return -ENOMEM; 671 671 672 - platform_set_drvdata(pdev, ci); 673 672 ci->dev = dev; 674 673 ci->platdata = dev_get_platdata(dev); 675 674 ci->imx28_write_fix = !!(ci->platdata->flags & ··· 782 783 } 783 784 } 784 785 786 + platform_set_drvdata(pdev, ci); 785 787 ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED, 786 788 ci->platdata->name, ci); 787 789 if (ret)
+1
drivers/usb/chipidea/host.c
··· 91 91 if (!hcd) 92 92 return -ENOMEM; 93 93 94 + dev_set_drvdata(ci->dev, ci); 94 95 hcd->rsrc_start = ci->hw_bank.phys; 95 96 hcd->rsrc_len = ci->hw_bank.size; 96 97 hcd->regs = ci->hw_bank.abs;
+7 -3
drivers/usb/dwc2/gadget.c
··· 2567 2567 * s3c_hsotg_ep_disable - disable given endpoint 2568 2568 * @ep: The endpoint to disable. 2569 2569 */ 2570 - static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2570 + static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force) 2571 2571 { 2572 2572 struct s3c_hsotg_ep *hs_ep = our_ep(ep); 2573 2573 struct dwc2_hsotg *hsotg = hs_ep->parent; ··· 2588 2588 2589 2589 spin_lock_irqsave(&hsotg->lock, flags); 2590 2590 /* terminate all requests with shutdown */ 2591 - kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); 2591 + kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force); 2592 2592 2593 2593 hsotg->fifo_map &= ~(1<<hs_ep->fifo_index); 2594 2594 hs_ep->fifo_index = 0; ··· 2609 2609 return 0; 2610 2610 } 2611 2611 2612 + static int s3c_hsotg_ep_disable(struct usb_ep *ep) 2613 + { 2614 + return s3c_hsotg_ep_disable_force(ep, false); 2615 + } 2612 2616 /** 2613 2617 * on_list - check request is on the given endpoint 2614 2618 * @ep: The endpoint to check. ··· 2928 2924 2929 2925 /* all endpoints should be shutdown */ 2930 2926 for (ep = 1; ep < hsotg->num_of_eps; ep++) 2931 - s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); 2927 + s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true); 2932 2928 2933 2929 spin_lock_irqsave(&hsotg->lock, flags); 2934 2930
+4
drivers/usb/dwc3/dwc3-pci.c
··· 33 33 #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 34 34 #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e 35 35 #define PCI_DEVICE_ID_INTEL_BSW 0x22B7 36 + #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 37 + #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36 38 37 39 struct dwc3_pci { 38 40 struct device *dev; ··· 221 219 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), }, 222 220 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 223 221 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 222 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 223 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 224 224 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 225 225 { } /* Terminating Entry */ 226 226 };
+4 -2
drivers/usb/dwc3/gadget.c
··· 882 882 883 883 if (i == (request->num_mapped_sgs - 1) || 884 884 sg_is_last(s)) { 885 - if (list_is_last(&req->list, 886 - &dep->request_list)) 885 + if (list_empty(&dep->request_list)) 887 886 last_one = true; 888 887 chain = false; 889 888 } ··· 900 901 if (last_one) 901 902 break; 902 903 } 904 + 905 + if (last_one) 906 + break; 903 907 } else { 904 908 dma = req->request.dma; 905 909 length = req->request.length;
+3 -2
drivers/usb/gadget/function/f_hid.c
··· 399 399 value = __le16_to_cpu(ctrl->wValue); 400 400 length = __le16_to_cpu(ctrl->wLength); 401 401 402 - VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x " 403 - "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value); 402 + VDBG(cdev, 403 + "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n", 404 + __func__, ctrl->bRequestType, ctrl->bRequest, value); 404 405 405 406 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 406 407 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
+1 -1
drivers/usb/gadget/function/f_midi.c
··· 520 520 req = midi_alloc_ep_req(ep, midi->buflen); 521 521 522 522 if (!req) { 523 - ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n"); 523 + ERROR(midi, "%s: alloc_ep_request failed\n", __func__); 524 524 return; 525 525 } 526 526 req->length = 0;
+1 -1
drivers/usb/gadget/function/f_uac1.c
··· 897 897 struct f_uac1_opts *opts; 898 898 899 899 opts = container_of(f, struct f_uac1_opts, func_inst); 900 - gaudio_cleanup(opts->card); 901 900 if (opts->fn_play_alloc) 902 901 kfree(opts->fn_play); 903 902 if (opts->fn_cap_alloc) ··· 934 935 struct f_audio *audio = func_to_audio(f); 935 936 struct f_uac1_opts *opts; 936 937 938 + gaudio_cleanup(&audio->card); 937 939 opts = container_of(f->fi, struct f_uac1_opts, func_inst); 938 940 kfree(audio); 939 941 mutex_lock(&opts->lock);
+2
drivers/usb/gadget/legacy/inode.c
··· 441 441 kbuf = memdup_user(buf, len); 442 442 if (IS_ERR(kbuf)) { 443 443 value = PTR_ERR(kbuf); 444 + kbuf = NULL; 444 445 goto free1; 445 446 } 446 447 ··· 450 449 data->name, len, (int) value); 451 450 free1: 452 451 mutex_unlock(&data->lock); 452 + kfree (kbuf); 453 453 return value; 454 454 } 455 455
+14 -5
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 716 716 req->using_dma = 1; 717 717 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) 718 718 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE 719 - | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; 719 + | USBA_DMA_END_BUF_EN; 720 720 721 - if (ep->is_in) 722 - req->ctrl |= USBA_DMA_END_BUF_EN; 721 + if (!ep->is_in) 722 + req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; 723 723 724 724 /* 725 725 * Add this request to the queue and submit for DMA if ··· 828 828 { 829 829 struct usba_ep *ep = to_usba_ep(_ep); 830 830 struct usba_udc *udc = ep->udc; 831 - struct usba_request *req = to_usba_req(_req); 831 + struct usba_request *req; 832 832 unsigned long flags; 833 833 u32 status; 834 834 ··· 836 836 ep->ep.name, req); 837 837 838 838 spin_lock_irqsave(&udc->lock, flags); 839 + 840 + list_for_each_entry(req, &ep->queue, queue) { 841 + if (&req->req == _req) 842 + break; 843 + } 844 + 845 + if (&req->req != _req) { 846 + spin_unlock_irqrestore(&udc->lock, flags); 847 + return -EINVAL; 848 + } 839 849 840 850 if (req->using_dma) { 841 851 /* ··· 1573 1563 if ((epstatus & epctrl) & USBA_RX_BK_RDY) { 1574 1564 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); 1575 1565 receive_data(ep); 1576 - usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); 1577 1566 } 1578 1567 } 1579 1568
+2 -1
drivers/usb/gadget/udc/bdc/bdc_ep.c
··· 718 718 struct bdc *bdc; 719 719 int ret = 0; 720 720 721 - bdc = ep->bdc; 722 721 if (!req || !ep || !ep->usb_ep.desc) 723 722 return -EINVAL; 723 + 724 + bdc = ep->bdc; 724 725 725 726 req->usb_req.actual = 0; 726 727 req->usb_req.status = -EINPROGRESS;
+7 -7
drivers/usb/host/ehci-sched.c
··· 1581 1581 else 1582 1582 next = (now + 2 + 7) & ~0x07; /* full frame cache */ 1583 1583 1584 + /* If needed, initialize last_iso_frame so that this URB will be seen */ 1585 + if (ehci->isoc_count == 0) 1586 + ehci->last_iso_frame = now >> 3; 1587 + 1584 1588 /* 1585 1589 * Use ehci->last_iso_frame as the base. There can't be any 1586 1590 * TDs scheduled for earlier than that. ··· 1604 1600 */ 1605 1601 now2 = (now - base) & (mod - 1); 1606 1602 1607 - /* Is the schedule already full? */ 1603 + /* Is the schedule about to wrap around? */ 1608 1604 if (unlikely(!empty && start < period)) { 1609 - ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", 1605 + ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", 1610 1606 urb, stream->next_uframe, base, period, mod); 1611 - status = -ENOSPC; 1607 + status = -EFBIG; 1612 1608 goto fail; 1613 1609 } 1614 1610 ··· 1675 1671 urb->start_frame = start & (mod - 1); 1676 1672 if (!stream->highspeed) 1677 1673 urb->start_frame >>= 3; 1678 - 1679 - /* Make sure scan_isoc() sees these */ 1680 - if (ehci->isoc_count == 0) 1681 - ehci->last_iso_frame = now >> 3; 1682 1674 return status; 1683 1675 1684 1676 fail:
+1 -1
drivers/usb/host/ehci-tegra.c
··· 451 451 452 452 u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); 453 453 if (IS_ERR(u_phy)) { 454 - err = PTR_ERR(u_phy); 454 + err = -EPROBE_DEFER; 455 455 goto cleanup_clk_en; 456 456 } 457 457 hcd->usb_phy = u_phy;
+15 -3
drivers/usb/host/pci-quirks.c
··· 567 567 { 568 568 void __iomem *base; 569 569 u32 control; 570 - u32 fminterval; 570 + u32 fminterval = 0; 571 + bool no_fminterval = false; 571 572 int cnt; 572 573 573 574 if (!mmio_resource_enabled(pdev, 0)) ··· 577 576 base = pci_ioremap_bar(pdev, 0); 578 577 if (base == NULL) 579 578 return; 579 + 580 + /* 581 + * ULi M5237 OHCI controller locks the whole system when accessing 582 + * the OHCI_FMINTERVAL offset. 583 + */ 584 + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) 585 + no_fminterval = true; 580 586 581 587 control = readl(base + OHCI_CONTROL); 582 588 ··· 623 615 } 624 616 625 617 /* software reset of the controller, preserving HcFmInterval */ 626 - fminterval = readl(base + OHCI_FMINTERVAL); 618 + if (!no_fminterval) 619 + fminterval = readl(base + OHCI_FMINTERVAL); 620 + 627 621 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 628 622 629 623 /* reset requires max 10 us delay */ ··· 634 624 break; 635 625 udelay(1); 636 626 } 637 - writel(fminterval, base + OHCI_FMINTERVAL); 627 + 628 + if (!no_fminterval) 629 + writel(fminterval, base + OHCI_FMINTERVAL); 638 630 639 631 /* Now the controller is safely in SUSPEND and nothing can wake it up */ 640 632 iounmap(base);
+2
drivers/usb/host/xhci-pci.c
··· 82 82 "must be suspended extra slowly", 83 83 pdev->revision); 84 84 } 85 + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) 86 + xhci->quirks |= XHCI_BROKEN_STREAMS; 85 87 /* Fresco Logic confirms: all revisions of this chip do not 86 88 * support MSI, even though some of them claim to in their PCI 87 89 * capabilities.
+9
drivers/usb/host/xhci.c
··· 3803 3803 return -EINVAL; 3804 3804 } 3805 3805 3806 + if (setup == SETUP_CONTEXT_ONLY) { 3807 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3808 + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3809 + SLOT_STATE_DEFAULT) { 3810 + xhci_dbg(xhci, "Slot already in default state\n"); 3811 + return 0; 3812 + } 3813 + } 3814 + 3806 3815 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3807 3816 if (!command) 3808 3817 return -ENOMEM;
+4
drivers/usb/musb/Kconfig
··· 72 72 73 73 config USB_MUSB_TUSB6010 74 74 tristate "TUSB6010" 75 + depends on ARCH_OMAP2PLUS || COMPILE_TEST 76 + depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules 75 77 76 78 config USB_MUSB_OMAP2PLUS 77 79 tristate "OMAP2430 and onwards" ··· 87 85 config USB_MUSB_DSPS 88 86 tristate "TI DSPS platforms" 89 87 select USB_MUSB_AM335X_CHILD 88 + depends on ARCH_OMAP2PLUS || COMPILE_TEST 90 89 depends on OF_IRQ 91 90 92 91 config USB_MUSB_BLACKFIN ··· 96 93 97 94 config USB_MUSB_UX500 98 95 tristate "Ux500 platforms" 96 + depends on ARCH_U8500 || COMPILE_TEST 99 97 100 98 config USB_MUSB_JZ4740 101 99 tristate "JZ4740"
+1 -1
drivers/usb/musb/blackfin.c
··· 63 63 bfin_write16(addr + offset, data); 64 64 } 65 65 66 - static void binf_writel(void __iomem *addr, unsigned offset, u32 data) 66 + static void bfin_writel(void __iomem *addr, unsigned offset, u32 data) 67 67 { 68 68 bfin_write16(addr + offset, (u16)data); 69 69 }
+2 -2
drivers/usb/musb/musb_cppi41.c
··· 628 628 ret = of_property_read_string_index(np, "dma-names", i, &str); 629 629 if (ret) 630 630 goto err; 631 - if (!strncmp(str, "tx", 2)) 631 + if (strstarts(str, "tx")) 632 632 is_tx = 1; 633 - else if (!strncmp(str, "rx", 2)) 633 + else if (strstarts(str, "rx")) 634 634 is_tx = 0; 635 635 else { 636 636 dev_err(dev, "Wrong dmatype %s\n", str);
+18 -16
drivers/usb/musb/musb_debugfs.c
··· 59 59 { "RxMaxPp", MUSB_RXMAXP, 16 }, 60 60 { "RxCSR", MUSB_RXCSR, 16 }, 61 61 { "RxCount", MUSB_RXCOUNT, 16 }, 62 - { "ConfigData", MUSB_CONFIGDATA,8 }, 63 62 { "IntrRxE", MUSB_INTRRXE, 16 }, 64 63 { "IntrTxE", MUSB_INTRTXE, 16 }, 65 64 { "IntrUsbE", MUSB_INTRUSBE, 8 }, 66 65 { "DevCtl", MUSB_DEVCTL, 8 }, 67 - { "BabbleCtl", MUSB_BABBLE_CTL,8 }, 68 - { "TxFIFOsz", MUSB_TXFIFOSZ, 8 }, 69 - { "RxFIFOsz", MUSB_RXFIFOSZ, 8 }, 70 - { "TxFIFOadd", MUSB_TXFIFOADD, 16 }, 71 - { "RxFIFOadd", MUSB_RXFIFOADD, 16 }, 72 66 { "VControl", 0x68, 32 }, 73 67 { "HWVers", 0x69, 16 }, 74 - { "EPInfo", MUSB_EPINFO, 8 }, 75 - { "RAMInfo", MUSB_RAMINFO, 8 }, 76 68 { "LinkInfo", MUSB_LINKINFO, 8 }, 77 69 { "VPLen", MUSB_VPLEN, 8 }, 78 70 { "HS_EOF1", MUSB_HS_EOF1, 8 }, ··· 95 103 { "DMA_CNTLch7", 0x274, 16 }, 96 104 { "DMA_ADDRch7", 0x278, 32 }, 97 105 { "DMA_COUNTch7", 0x27C, 32 }, 106 + #ifndef CONFIG_BLACKFIN 107 + { "ConfigData", MUSB_CONFIGDATA,8 }, 108 + { "BabbleCtl", MUSB_BABBLE_CTL,8 }, 109 + { "TxFIFOsz", MUSB_TXFIFOSZ, 8 }, 110 + { "RxFIFOsz", MUSB_RXFIFOSZ, 8 }, 111 + { "TxFIFOadd", MUSB_TXFIFOADD, 16 }, 112 + { "RxFIFOadd", MUSB_RXFIFOADD, 16 }, 113 + { "EPInfo", MUSB_EPINFO, 8 }, 114 + { "RAMInfo", MUSB_RAMINFO, 8 }, 115 + #endif 98 116 { } /* Terminating Entry */ 99 117 }; 100 118 ··· 199 197 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 200 198 return -EFAULT; 201 199 202 - if (!strncmp(buf, "force host", 9)) 200 + if (strstarts(buf, "force host")) 203 201 test = MUSB_TEST_FORCE_HOST; 204 202 205 - if (!strncmp(buf, "fifo access", 11)) 203 + if (strstarts(buf, "fifo access")) 206 204 test = MUSB_TEST_FIFO_ACCESS; 207 205 208 - if (!strncmp(buf, "force full-speed", 15)) 206 + if (strstarts(buf, "force full-speed")) 209 207 test = MUSB_TEST_FORCE_FS; 210 208 211 - if (!strncmp(buf, "force high-speed", 15)) 209 + if (strstarts(buf, "force high-speed")) 212 210 test = MUSB_TEST_FORCE_HS; 213 211 214 - if (!strncmp(buf, "test packet", 10)) { 212 + if (strstarts(buf, "test packet")) { 215 213 test = MUSB_TEST_PACKET; 216 214 musb_load_testpacket(musb); 217 215 } 218 216 219 - if (!strncmp(buf, "test K", 6)) 217 + if (strstarts(buf, "test K")) 220 218 test = MUSB_TEST_K; 221 219 222 - if (!strncmp(buf, "test J", 6)) 220 + if (strstarts(buf, "test J")) 223 221 test = MUSB_TEST_J; 224 222 225 - if (!strncmp(buf, "test SE0 NAK", 12)) 223 + if (strstarts(buf, "test SE0 NAK")) 226 224 test = MUSB_TEST_SE0_NAK; 227 225 228 226 musb_writeb(musb->mregs, MUSB_TESTMODE, test);
-1
drivers/usb/musb/musb_host.c
··· 2663 2663 if (musb->port_mode == MUSB_PORT_MODE_GADGET) 2664 2664 return; 2665 2665 usb_remove_hcd(musb->hcd); 2666 - musb->hcd = NULL; 2667 2666 } 2668 2667 2669 2668 void musb_host_free(struct musb *musb)
+2 -3
drivers/usb/phy/phy-mv-usb.c
··· 338 338 static void mv_otg_update_state(struct mv_otg *mvotg) 339 339 { 340 340 struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; 341 - struct usb_phy *phy = &mvotg->phy; 342 341 int old_state = mvotg->phy.otg->state; 343 342 344 343 switch (old_state) { ··· 857 858 { 858 859 struct mv_otg *mvotg = platform_get_drvdata(pdev); 859 860 860 - if (mvotg->phy.state != OTG_STATE_B_IDLE) { 861 + if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) { 861 862 dev_info(&pdev->dev, 862 863 "OTG state is not B_IDLE, it is %d!\n", 863 - mvotg->phy.state); 864 + mvotg->phy.otg->state); 864 865 return -EAGAIN; 865 866 } 866 867
+11 -5
drivers/usb/phy/phy.c
··· 34 34 return phy; 35 35 } 36 36 37 - return ERR_PTR(-ENODEV); 37 + return ERR_PTR(-EPROBE_DEFER); 38 38 } 39 39 40 40 static struct usb_phy *__usb_find_phy_dev(struct device *dev, ··· 59 59 { 60 60 struct usb_phy *phy; 61 61 62 + if (!of_device_is_available(node)) 63 + return ERR_PTR(-ENODEV); 64 + 62 65 list_for_each_entry(phy, &phy_list, head) { 63 66 if (node != phy->dev->of_node) 64 67 continue; ··· 69 66 return phy; 70 67 } 71 68 72 - return ERR_PTR(-ENODEV); 69 + return ERR_PTR(-EPROBE_DEFER); 73 70 } 74 71 75 72 static void devm_usb_phy_release(struct device *dev, void *res) ··· 193 190 spin_lock_irqsave(&phy_lock, flags); 194 191 195 192 phy = __of_usb_find_phy(node); 196 - if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 197 - if (!IS_ERR(phy)) 198 - phy = ERR_PTR(-EPROBE_DEFER); 193 + if (IS_ERR(phy)) { 194 + devres_free(ptr); 195 + goto err1; 196 + } 199 197 198 + if (!try_module_get(phy->dev->driver->owner)) { 199 + phy = ERR_PTR(-ENODEV); 200 200 devres_free(ptr); 201 201 goto err1; 202 202 }
+11 -5
drivers/usb/serial/console.c
··· 46 46 * ------------------------------------------------------------ 47 47 */ 48 48 49 + static const struct tty_operations usb_console_fake_tty_ops = { 50 + }; 49 51 50 52 /* 51 53 * The parsing of the command line works exactly like the ··· 139 137 goto reset_open_count; 140 138 } 141 139 kref_init(&tty->kref); 142 - tty_port_tty_set(&port->port, tty); 143 140 tty->driver = usb_serial_tty_driver; 144 141 tty->index = co->index; 142 + init_ldsem(&tty->ldisc_sem); 143 + INIT_LIST_HEAD(&tty->tty_files); 144 + kref_get(&tty->driver->kref); 145 + tty->ops = &usb_console_fake_tty_ops; 145 146 if (tty_init_termios(tty)) { 146 147 retval = -ENOMEM; 147 - goto free_tty; 148 + goto put_tty; 148 149 } 150 + tty_port_tty_set(&port->port, tty); 149 151 } 150 152 151 153 /* only call the device specific open if this ··· 167 161 serial->type->set_termios(tty, port, &dummy); 168 162 169 163 tty_port_tty_set(&port->port, NULL); 170 - kfree(tty); 164 + tty_kref_put(tty); 171 165 } 172 166 set_bit(ASYNCB_INITIALIZED, &port->port.flags); 173 167 } ··· 183 177 184 178 fail: 185 179 tty_port_tty_set(&port->port, NULL); 186 - free_tty: 187 - kfree(tty); 180 + put_tty: 181 + tty_kref_put(tty); 188 182 reset_open_count: 189 183 port->port.count = 0; 190 184 usb_autopm_put_interface(serial->interface);
+3 -1
drivers/usb/serial/cp210x.c
··· 120 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 121 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 122 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 123 - { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ 123 + { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ 124 + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ 124 125 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 125 126 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 126 127 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 128 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 127 129 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 128 130 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 129 131 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+2 -2
drivers/usb/serial/generic.c
··· 286 286 287 287 res = usb_submit_urb(port->read_urbs[index], mem_flags); 288 288 if (res) { 289 - if (res != -EPERM) { 289 + if (res != -EPERM && res != -ENODEV) { 290 290 dev_err(&port->dev, 291 291 "%s - usb_submit_urb failed: %d\n", 292 292 __func__, res); ··· 373 373 __func__, urb->status); 374 374 return; 375 375 default: 376 - dev_err(&port->dev, "%s - nonzero urb status: %d\n", 376 + dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", 377 377 __func__, urb->status); 378 378 goto resubmit; 379 379 }
+15 -5
drivers/usb/serial/keyspan.c
··· 410 410 } 411 411 port = serial->port[msg->port]; 412 412 p_priv = usb_get_serial_port_data(port); 413 + if (!p_priv) 414 + goto resubmit; 413 415 414 416 /* Update handshaking pin state information */ 415 417 old_dcd_state = p_priv->dcd_state; ··· 422 420 423 421 if (old_dcd_state != p_priv->dcd_state) 424 422 tty_port_tty_hangup(&port->port, true); 425 - 423 + resubmit: 426 424 /* Resubmit urb so we continue receiving */ 427 425 err = usb_submit_urb(urb, GFP_ATOMIC); 428 426 if (err != 0) ··· 529 527 } 530 528 port = serial->port[msg->port]; 531 529 p_priv = usb_get_serial_port_data(port); 530 + if (!p_priv) 531 + goto resubmit; 532 532 533 533 /* Update handshaking pin state information */ 534 534 old_dcd_state = p_priv->dcd_state; ··· 541 537 542 538 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 543 539 tty_port_tty_hangup(&port->port, true); 544 - 540 + resubmit: 545 541 /* Resubmit urb so we continue receiving */ 546 542 err = usb_submit_urb(urb, GFP_ATOMIC); 547 543 if (err != 0) ··· 611 607 } 612 608 port = serial->port[msg->portNumber]; 613 609 p_priv = usb_get_serial_port_data(port); 610 + if (!p_priv) 611 + goto resubmit; 614 612 615 613 /* Update handshaking pin state information */ 616 614 old_dcd_state = p_priv->dcd_state; ··· 623 617 624 618 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 625 619 tty_port_tty_hangup(&port->port, true); 626 - 620 + resubmit: 627 621 /* Resubmit urb so we continue receiving */ 628 622 err = usb_submit_urb(urb, GFP_ATOMIC); 629 623 if (err != 0) ··· 861 855 862 856 port = serial->port[0]; 863 857 p_priv = usb_get_serial_port_data(port); 858 + if (!p_priv) 859 + goto resubmit; 864 860 865 861 /* Update handshaking pin state information */ 866 862 old_dcd_state = p_priv->dcd_state; ··· 873 865 874 866 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 875 867 tty_port_tty_hangup(&port->port, true); 876 - 868 + resubmit: 877 869 /* Resubmit urb so we continue receiving */ 878 870 err = usb_submit_urb(urb, GFP_ATOMIC); 879 871 if (err != 0) ··· 934 926 935 927 port = serial->port[msg->port]; 936 928 p_priv = usb_get_serial_port_data(port); 929 + if (!p_priv) 930 + goto resubmit; 937 931 938 932 /* Update handshaking pin state information */ 939 933 old_dcd_state = p_priv->dcd_state; ··· 944 934 945 935 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 946 936 tty_port_tty_hangup(&port->port, true); 947 - 937 + resubmit: 948 938 /* Resubmit urb so we continue receiving */ 949 939 err = usb_submit_urb(urb, GFP_ATOMIC); 950 940 if (err != 0)
+10 -1
drivers/usb/serial/option.c
··· 234 234 235 235 #define QUALCOMM_VENDOR_ID 0x05C6 236 236 237 + #define SIERRA_VENDOR_ID 0x1199 238 + 237 239 #define CMOTECH_VENDOR_ID 0x16d8 238 240 #define CMOTECH_PRODUCT_6001 0x6001 239 241 #define CMOTECH_PRODUCT_CMU_300 0x6002 ··· 514 512 OPTION_BLACKLIST_RESERVED_IF = 2 515 513 }; 516 514 517 - #define MAX_BL_NUM 8 515 + #define MAX_BL_NUM 11 518 516 struct option_blacklist_info { 519 517 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ 520 518 const unsigned long sendsetup; ··· 601 599 static const struct option_blacklist_info telit_le920_blacklist = { 602 600 .sendsetup = BIT(0), 603 601 .reserved = BIT(1) | BIT(5), 602 + }; 603 + 604 + static const struct option_blacklist_info sierra_mc73xx_blacklist = { 605 + .sendsetup = BIT(0) | BIT(2), 606 + .reserved = BIT(8) | BIT(10) | BIT(11), 604 607 }; 605 608 606 609 static const struct usb_device_id option_ids[] = { ··· 1105 1098 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1106 1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1107 1100 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1101 + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), 1102 + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ 1108 1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1109 1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1110 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
-1
drivers/usb/serial/qcserial.c
··· 142 142 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ 143 143 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ 144 144 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ 145 - {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */ 146 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 147 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 148 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
+28 -5
drivers/usb/storage/uas-detect.h
··· 69 69 return 0; 70 70 71 71 /* 72 - * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is 73 - * broken on the ASM1051, use the number of streams to differentiate. 74 - * New ASM1053-s also support 32 streams, but have a different prod-id. 72 + * ASMedia has a number of usb3 to sata bridge chips, at the time of 73 + * this writing the following versions exist: 74 + * ASM1051 - no uas support version 75 + * ASM1051 - with broken (*) uas support 76 + * ASM1053 - with working uas support 77 + * ASM1153 - with working uas support 78 + * 79 + * Devices with these chips re-use a number of device-ids over the 80 + * entire line, so the device-id is useless to determine if we're 81 + * dealing with an ASM1051 (which we want to avoid). 82 + * 83 + * The ASM1153 can be identified by config.MaxPower == 0, 84 + * where as the ASM105x models have config.MaxPower == 36. 85 + * 86 + * Differentiating between the ASM1053 and ASM1051 is trickier, when 87 + * connected over USB-3 we can look at the number of streams supported, 88 + * ASM1051 supports 32 streams, where as early ASM1053 versions support 89 + * 16 streams, newer ASM1053-s also support 32 streams, but have a 90 + * different prod-id. 91 + * 92 + * (*) ASM1051 chips do work with UAS with some disks (with the 93 + * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks 75 94 */ 76 95 if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && 77 - le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) { 78 - if (udev->speed < USB_SPEED_SUPER) { 96 + (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || 97 + le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { 98 + if (udev->actconfig->desc.bMaxPower == 0) { 99 + /* ASM1153, do nothing */ 100 + } else if (udev->speed < USB_SPEED_SUPER) { 79 101 /* No streams info, assume ASM1051 */ 80 102 flags |= US_FL_IGNORE_UAS; 81 103 } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { 104 + /* Possibly an ASM1051, disable uas */ 82 105 flags |= US_FL_IGNORE_UAS; 83 106 } 84 107 }
+38 -8
drivers/usb/storage/unusual_uas.h
··· 40 40 * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> 41 41 */ 42 42 43 + /* 44 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI 45 + * commands in UAS mode. Observed with the 1.28 firmware; are there others? 46 + */ 47 + UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128, 48 + "Apricorn", 49 + "", 50 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 51 + US_FL_IGNORE_UAS), 52 + 43 53 /* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ 44 54 UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999, 45 55 "Seagate", ··· 78 68 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 79 69 US_FL_NO_ATA_1X), 80 70 71 + /* Reported-by: Marcin Zajączkowski <mszpak@wp.pl> */ 72 + UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999, 73 + "Seagate", 74 + "Backup Plus", 75 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 76 + US_FL_NO_ATA_1X), 77 + 78 + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 79 + UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999, 80 + "Seagate", 81 + "Backup Plus Desk", 82 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 83 + US_FL_NO_ATA_1X), 84 + 81 85 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */ 82 86 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, 83 87 "Seagate", ··· 106 82 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 107 83 US_FL_NO_ATA_1X), 108 84 85 + /* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */ 86 + UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, 87 + "Seagate", 88 + "BUP Fast HDD", 89 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 90 + US_FL_NO_ATA_1X), 91 + 109 92 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ 110 93 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, 111 94 "JMicron", ··· 120 89 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 121 90 US_FL_NO_REPORT_OPCODES), 122 91 123 - /* Most ASM1051 based devices have issues with uas, blacklist them all */ 124 - /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 125 - UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999, 126 - "ASMedia", 127 - "ASM1051", 128 - USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 - US_FL_IGNORE_UAS), 130 - 131 92 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 132 93 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 133 94 "VIA", 134 95 "VL711", 135 96 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 136 97 US_FL_NO_ATA_1X), 98 + 99 + /* Reported-by: Takeo Nakayama <javhera@gmx.com> */ 100 + UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, 101 + "JMicron", 102 + "JMS566", 103 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 104 + US_FL_NO_REPORT_OPCODES), 137 105 138 106 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 139 107 UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+1 -3
drivers/vfio/pci/vfio_pci.c
··· 840 840 841 841 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 842 842 { 843 - u8 type; 844 843 struct vfio_pci_device *vdev; 845 844 struct iommu_group *group; 846 845 int ret; 847 846 848 - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); 849 - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) 847 + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) 850 848 return -EINVAL; 851 849 852 850 group = iommu_group_get(&pdev->dev);
+1 -1
drivers/vhost/net.c
··· 538 538 ++headcount; 539 539 seg += in; 540 540 } 541 - heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); 541 + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); 542 542 *iovcount = seg; 543 543 if (unlikely(log)) 544 544 *log_num = nlogs;
+21 -3
drivers/vhost/scsi.c
··· 911 911 return 0; 912 912 } 913 913 914 + static int vhost_scsi_to_tcm_attr(int attr) 915 + { 916 + switch (attr) { 917 + case VIRTIO_SCSI_S_SIMPLE: 918 + return TCM_SIMPLE_TAG; 919 + case VIRTIO_SCSI_S_ORDERED: 920 + return TCM_ORDERED_TAG; 921 + case VIRTIO_SCSI_S_HEAD: 922 + return TCM_HEAD_TAG; 923 + case VIRTIO_SCSI_S_ACA: 924 + return TCM_ACA_TAG; 925 + default: 926 + break; 927 + } 928 + return TCM_SIMPLE_TAG; 929 + } 930 + 914 931 static void tcm_vhost_submission_work(struct work_struct *work) 915 932 { 916 933 struct tcm_vhost_cmd *cmd = ··· 953 936 rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, 954 937 cmd->tvc_cdb, &cmd->tvc_sense_buf[0], 955 938 cmd->tvc_lun, cmd->tvc_exp_data_len, 956 - cmd->tvc_task_attr, cmd->tvc_data_direction, 957 - TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, 958 - NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); 939 + vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), 940 + cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, 941 + sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, 942 + cmd->tvc_prot_sgl_count); 959 943 if (rc < 0) { 960 944 transport_send_check_condition_and_sense(se_cmd, 961 945 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+7 -3
drivers/vhost/vhost.c
··· 713 713 r = -EFAULT; 714 714 break; 715 715 } 716 - if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) || 717 - (a.used_user_addr & (sizeof *vq->used->ring - 1)) || 718 - (a.log_guest_addr & (sizeof *vq->used->ring - 1))) { 716 + 717 + /* Make sure it's safe to cast pointers to vring types. */ 718 + BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); 719 + BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 720 + if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 721 + (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 722 + (a.log_guest_addr & (sizeof(u64) - 1))) { 719 723 r = -EINVAL; 720 724 break; 721 725 }
+5 -3
drivers/video/fbdev/broadsheetfb.c
··· 636 636 err = broadsheet_spiflash_read_range(par, start_sector_addr, 637 637 data_start_addr, sector_buffer); 638 638 if (err) 639 - return err; 639 + goto out; 640 640 } 641 641 642 642 /* now we copy our data into the right place in the sector buffer */ ··· 657 657 err = broadsheet_spiflash_read_range(par, tail_start_addr, 658 658 tail_len, sector_buffer + tail_start_addr); 659 659 if (err) 660 - return err; 660 + goto out; 661 661 } 662 662 663 663 /* if we got here we have the full sector that we want to rewrite. */ ··· 665 665 /* first erase the sector */ 666 666 err = broadsheet_spiflash_erase_sector(par, start_sector_addr); 667 667 if (err) 668 - return err; 668 + goto out; 669 669 670 670 /* now write it */ 671 671 err = broadsheet_spiflash_write_sector(par, start_sector_addr, 672 672 sector_buffer, sector_size); 673 + out: 674 + kfree(sector_buffer); 673 675 return err; 674 676 } 675 677
+3 -2
drivers/video/fbdev/core/fb_defio.c
··· 83 83 cancel_delayed_work_sync(&info->deferred_work); 84 84 85 85 /* Run it immediately */ 86 - err = schedule_delayed_work(&info->deferred_work, 0); 86 + schedule_delayed_work(&info->deferred_work, 0); 87 87 mutex_unlock(&inode->i_mutex); 88 - return err; 88 + 89 + return 0; 89 90 } 90 91 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); 91 92
-2
drivers/video/fbdev/omap2/dss/hdmi_pll.c
··· 132 132 .mX_max = 127, 133 133 .fint_min = 500000, 134 134 .fint_max = 2500000, 135 - .clkdco_max = 1800000000, 136 135 137 136 .clkdco_min = 500000000, 138 137 .clkdco_low = 1000000000, ··· 155 156 .mX_max = 127, 156 157 .fint_min = 620000, 157 158 .fint_max = 2500000, 158 - .clkdco_max = 1800000000, 159 159 160 160 .clkdco_min = 750000000, 161 161 .clkdco_low = 1500000000,
+2 -1
drivers/video/fbdev/omap2/dss/pll.c
··· 97 97 return 0; 98 98 99 99 err_enable: 100 - regulator_disable(pll->regulator); 100 + if (pll->regulator) 101 + regulator_disable(pll->regulator); 101 102 err_reg: 102 103 clk_disable_unprepare(pll->clkin); 103 104 return r;
+2
drivers/video/fbdev/omap2/dss/sdi.c
··· 342 342 out->output_type = OMAP_DISPLAY_TYPE_SDI; 343 343 out->name = "sdi.0"; 344 344 out->dispc_channel = OMAP_DSS_CHANNEL_LCD; 345 + /* We have SDI only on OMAP3, where it's on port 1 */ 346 + out->port_num = 1; 345 347 out->ops.sdi = &sdi_ops; 346 348 out->owner = THIS_MODULE; 347 349
+1 -1
drivers/video/fbdev/simplefb.c
··· 402 402 if (ret) 403 403 return ret; 404 404 405 - if (IS_ENABLED(CONFIG_OF) && of_chosen) { 405 + if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) { 406 406 for_each_child_of_node(of_chosen, np) { 407 407 if (of_device_is_compatible(np, "simple-framebuffer")) 408 408 of_platform_device_create(np, NULL, NULL);
+16 -1
drivers/video/logo/logo.c
··· 21 21 module_param(nologo, bool, 0); 22 22 MODULE_PARM_DESC(nologo, "Disables startup logo"); 23 23 24 + /* 25 + * Logos are located in the initdata, and will be freed in kernel_init. 26 + * Use late_init to mark the logos as freed to prevent any further use. 27 + */ 28 + 29 + static bool logos_freed; 30 + 31 + static int __init fb_logo_late_init(void) 32 + { 33 + logos_freed = true; 34 + return 0; 35 + } 36 + 37 + late_initcall(fb_logo_late_init); 38 + 24 39 /* logo's are marked __initdata. Use __init_refok to tell 25 40 * modpost that it is intended that this function uses data 26 41 * marked __initdata. ··· 44 29 { 45 30 const struct linux_logo *logo = NULL; 46 31 47 - if (nologo) 32 + if (nologo || logos_freed) 48 33 return NULL; 49 34 50 35 if (depth >= 1) {
+1 -9
drivers/virtio/virtio_pci_common.c
··· 282 282 283 283 vp_free_vectors(vdev); 284 284 kfree(vp_dev->vqs); 285 + vp_dev->vqs = NULL; 285 286 } 286 287 287 288 static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, ··· 420 419 } 421 420 } 422 421 return 0; 423 - } 424 - 425 - void virtio_pci_release_dev(struct device *_d) 426 - { 427 - /* 428 - * No need for a release method as we allocate/free 429 - * all devices together with the pci devices. 430 - * Provide an empty one to avoid getting a warning from core. 431 - */ 432 422 } 433 423 434 424 #ifdef CONFIG_PM_SLEEP
-1
drivers/virtio/virtio_pci_common.h
··· 126 126 * - ignore the affinity request if we're using INTX 127 127 */ 128 128 int vp_set_vq_affinity(struct virtqueue *vq, int cpu); 129 - void virtio_pci_release_dev(struct device *); 130 129 131 130 int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 132 131 const struct pci_device_id *id);
+11 -1
drivers/virtio/virtio_pci_legacy.c
··· 211 211 .set_vq_affinity = vp_set_vq_affinity, 212 212 }; 213 213 214 + static void virtio_pci_release_dev(struct device *_d) 215 + { 216 + struct virtio_device *vdev = dev_to_virtio(_d); 217 + struct virtio_pci_device *vp_dev = to_vp_device(vdev); 218 + 219 + /* As struct device is a kobject, it's not safe to 220 + * free the memory (including the reference counter itself) 221 + * until it's release callback. */ 222 + kfree(vp_dev); 223 + } 224 + 214 225 /* the PCI probing function */ 215 226 int virtio_pci_legacy_probe(struct pci_dev *pci_dev, 216 227 const struct pci_device_id *id) ··· 313 302 pci_iounmap(pci_dev, vp_dev->ioaddr); 314 303 pci_release_regions(pci_dev); 315 304 pci_disable_device(pci_dev); 316 - kfree(vp_dev); 317 305 }
+10 -3
fs/btrfs/backref.c
··· 1552 1552 { 1553 1553 int ret; 1554 1554 int type; 1555 - struct btrfs_tree_block_info *info; 1556 1555 struct btrfs_extent_inline_ref *eiref; 1557 1556 1558 1557 if (*ptr == (unsigned long)-1) ··· 1572 1573 } 1573 1574 1574 1575 /* we can treat both ref types equally here */ 1575 - info = (struct btrfs_tree_block_info *)(ei + 1); 1576 1576 *out_root = btrfs_extent_inline_ref_offset(eb, eiref); 1577 - *out_level = btrfs_tree_block_level(eb, info); 1577 + 1578 + if (key->type == BTRFS_EXTENT_ITEM_KEY) { 1579 + struct btrfs_tree_block_info *info; 1580 + 1581 + info = (struct btrfs_tree_block_info *)(ei + 1); 1582 + *out_level = btrfs_tree_block_level(eb, info); 1583 + } else { 1584 + ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); 1585 + *out_level = (u8)key->offset; 1586 + } 1578 1587 1579 1588 if (ret == 1) 1580 1589 *ptr = (unsigned long)-1;
+8
fs/btrfs/delayed-inode.c
··· 1857 1857 { 1858 1858 struct btrfs_delayed_node *delayed_node; 1859 1859 1860 + /* 1861 + * we don't do delayed inode updates during log recovery because it 1862 + * leads to enospc problems. This means we also can't do 1863 + * delayed inode refs 1864 + */ 1865 + if (BTRFS_I(inode)->root->fs_info->log_root_recovering) 1866 + return -EAGAIN; 1867 + 1860 1868 delayed_node = btrfs_get_or_create_delayed_node(inode); 1861 1869 if (IS_ERR(delayed_node)) 1862 1870 return PTR_ERR(delayed_node);
+6 -6
fs/btrfs/extent-tree.c
··· 3139 3139 struct extent_buffer *leaf; 3140 3140 3141 3141 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 3142 - if (ret < 0) 3142 + if (ret) { 3143 + if (ret > 0) 3144 + ret = -ENOENT; 3143 3145 goto fail; 3144 - BUG_ON(ret); /* Corruption */ 3146 + } 3145 3147 3146 3148 leaf = path->nodes[0]; 3147 3149 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); ··· 3151 3149 btrfs_mark_buffer_dirty(leaf); 3152 3150 btrfs_release_path(path); 3153 3151 fail: 3154 - if (ret) { 3152 + if (ret) 3155 3153 btrfs_abort_transaction(trans, root, ret); 3156 - return ret; 3157 - } 3158 - return 0; 3154 + return ret; 3159 3155 3160 3156 } 3161 3157
+3 -1
fs/btrfs/inode.c
··· 6255 6255 6256 6256 out_fail: 6257 6257 btrfs_end_transaction(trans, root); 6258 - if (drop_on_err) 6258 + if (drop_on_err) { 6259 + inode_dec_link_count(inode); 6259 6260 iput(inode); 6261 + } 6260 6262 btrfs_balance_delayed_items(root); 6261 6263 btrfs_btree_balance_dirty(root); 6262 6264 return err;
+1 -1
fs/btrfs/scrub.c
··· 2607 2607 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, 2608 2608 flags, gen, mirror_num, 2609 2609 have_csum ? csum : NULL); 2610 - skip: 2611 2610 if (ret) 2612 2611 return ret; 2612 + skip: 2613 2613 len -= l; 2614 2614 logical += l; 2615 2615 physical += l;
+1 -1
fs/ceph/addr.c
··· 1416 1416 } 1417 1417 } 1418 1418 1419 - dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", 1419 + dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", 1420 1420 inode, ceph_vinop(inode), len, locked_page); 1421 1421 1422 1422 if (len > 0) {
+3 -3
fs/cifs/cifsglob.h
··· 661 661 server->ops->set_credits(server, val); 662 662 } 663 663 664 - static inline __u64 664 + static inline __le64 665 665 get_next_mid64(struct TCP_Server_Info *server) 666 666 { 667 - return server->ops->get_next_mid(server); 667 + return cpu_to_le64(server->ops->get_next_mid(server)); 668 668 } 669 669 670 670 static inline __le16 671 671 get_next_mid(struct TCP_Server_Info *server) 672 672 { 673 - __u16 mid = get_next_mid64(server); 673 + __u16 mid = server->ops->get_next_mid(server); 674 674 /* 675 675 * The value in the SMB header should be little endian for easy 676 676 * on-the-wire decoding.
+7 -5
fs/cifs/netmisc.c
··· 926 926 927 927 /* Subtract the NTFS time offset, then convert to 1s intervals. */ 928 928 s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; 929 + u64 abs_t; 929 930 930 931 /* 931 932 * Unfortunately can not use normal 64 bit division on 32 bit arch, but ··· 934 933 * to special case them 935 934 */ 936 935 if (t < 0) { 937 - t = -t; 938 - ts.tv_nsec = (long)(do_div(t, 10000000) * 100); 936 + abs_t = -t; 937 + ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100); 939 938 ts.tv_nsec = -ts.tv_nsec; 940 - ts.tv_sec = -t; 939 + ts.tv_sec = -abs_t; 941 940 } else { 942 - ts.tv_nsec = (long)do_div(t, 10000000) * 100; 943 - ts.tv_sec = t; 941 + abs_t = t; 942 + ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100; 943 + ts.tv_sec = abs_t; 944 944 } 945 945 946 946 return ts;
+7 -3
fs/cifs/readdir.c
··· 69 69 * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT 70 70 * 71 71 * Find the dentry that matches "name". If there isn't one, create one. If it's 72 - * a negative dentry or the uniqueid changed, then drop it and recreate it. 72 + * a negative dentry or the uniqueid or filetype(mode) changed, 73 + * then drop it and recreate it. 73 74 */ 74 75 static void 75 76 cifs_prime_dcache(struct dentry *parent, struct qstr *name, ··· 98 97 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) 99 98 fattr->cf_uniqueid = CIFS_I(inode)->uniqueid; 100 99 101 - /* update inode in place if i_ino didn't change */ 102 - if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) { 100 + /* update inode in place 101 + * if both i_ino and i_mode didn't change */ 102 + if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid && 103 + (inode->i_mode & S_IFMT) == 104 + (fattr->cf_mode & S_IFMT)) { 103 105 cifs_fattr_to_inode(inode, fattr); 104 106 goto out; 105 107 }
+7 -5
fs/cifs/smb2misc.c
··· 32 32 static int 33 33 check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) 34 34 { 35 + __u64 wire_mid = le64_to_cpu(hdr->MessageId); 36 + 35 37 /* 36 38 * Make sure that this really is an SMB, that it is a response, 37 39 * and that the message ids match. 38 40 */ 39 41 if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) && 40 - (mid == hdr->MessageId)) { 42 + (mid == wire_mid)) { 41 43 if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) 42 44 return 0; 43 45 else { ··· 53 51 if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER) 54 52 cifs_dbg(VFS, "Bad protocol string signature header %x\n", 55 53 *(unsigned int *) hdr->ProtocolId); 56 - if (mid != hdr->MessageId) 54 + if (mid != wire_mid) 57 55 cifs_dbg(VFS, "Mids do not match: %llu and %llu\n", 58 - mid, hdr->MessageId); 56 + mid, wire_mid); 59 57 } 60 - cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId); 58 + cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid); 61 59 return 1; 62 60 } 63 61 ··· 97 95 { 98 96 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 99 97 struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; 100 - __u64 mid = hdr->MessageId; 98 + __u64 mid = le64_to_cpu(hdr->MessageId); 101 99 __u32 len = get_rfc1002_length(buf); 102 100 __u32 clc_len; /* calculated length */ 103 101 int command;
+2 -1
fs/cifs/smb2ops.c
··· 176 176 { 177 177 struct mid_q_entry *mid; 178 178 struct smb2_hdr *hdr = (struct smb2_hdr *)buf; 179 + __u64 wire_mid = le64_to_cpu(hdr->MessageId); 179 180 180 181 spin_lock(&GlobalMid_Lock); 181 182 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 182 - if ((mid->mid == hdr->MessageId) && 183 + if ((mid->mid == wire_mid) && 183 184 (mid->mid_state == MID_REQUEST_SUBMITTED) && 184 185 (mid->command == hdr->Command)) { 185 186 spin_unlock(&GlobalMid_Lock);
+1 -1
fs/cifs/smb2pdu.h
··· 110 110 __le16 CreditRequest; /* CreditResponse */ 111 111 __le32 Flags; 112 112 __le32 NextCommand; 113 - __u64 MessageId; /* opaque - so can stay little endian */ 113 + __le64 MessageId; 114 114 __le32 ProcessId; 115 115 __u32 TreeId; /* opaque - so do not make little endian */ 116 116 __u64 SessionId; /* opaque - so do not make little endian */
+1 -1
fs/cifs/smb2transport.c
··· 490 490 return temp; 491 491 else { 492 492 memset(temp, 0, sizeof(struct mid_q_entry)); 493 - temp->mid = smb_buffer->MessageId; /* always LE */ 493 + temp->mid = le64_to_cpu(smb_buffer->MessageId); 494 494 temp->pid = current->pid; 495 495 temp->command = smb_buffer->Command; /* Always LE */ 496 496 temp->when_alloc = jiffies;
+2 -2
fs/ext4/extents.c
··· 5166 5166 5167 5167 /* fallback to generic here if not in extents fmt */ 5168 5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5169 - return __generic_block_fiemap(inode, fieinfo, start, len, 5170 - ext4_get_block); 5169 + return generic_block_fiemap(inode, fieinfo, start, len, 5170 + ext4_get_block); 5171 5171 5172 5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5173 5173 return -EBADR;
+116 -108
fs/ext4/file.c
··· 273 273 * we determine this extent as a data or a hole according to whether the 274 274 * page cache has data or not. 275 275 */ 276 - static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, 277 - loff_t endoff, loff_t *offset) 276 + static int ext4_find_unwritten_pgoff(struct inode *inode, 277 + int whence, 278 + struct ext4_map_blocks *map, 279 + loff_t *offset) 278 280 { 279 281 struct pagevec pvec; 282 + unsigned int blkbits; 280 283 pgoff_t index; 281 284 pgoff_t end; 285 + loff_t endoff; 282 286 loff_t startoff; 283 287 loff_t lastoff; 284 288 int found = 0; 285 289 290 + blkbits = inode->i_sb->s_blocksize_bits; 286 291 startoff = *offset; 287 292 lastoff = startoff; 288 - 293 + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; 289 294 290 295 index = startoff >> PAGE_CACHE_SHIFT; 291 296 end = endoff >> PAGE_CACHE_SHIFT; ··· 408 403 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 409 404 { 410 405 struct inode *inode = file->f_mapping->host; 411 - struct fiemap_extent_info fie; 412 - struct fiemap_extent ext[2]; 413 - loff_t next; 414 - int i, ret = 0; 406 + struct ext4_map_blocks map; 407 + struct extent_status es; 408 + ext4_lblk_t start, last, end; 409 + loff_t dataoff, isize; 410 + int blkbits; 411 + int ret = 0; 415 412 416 413 mutex_lock(&inode->i_mutex); 417 - if (offset >= inode->i_size) { 414 + 415 + isize = i_size_read(inode); 416 + if (offset >= isize) { 418 417 mutex_unlock(&inode->i_mutex); 419 418 return -ENXIO; 420 419 } 421 - fie.fi_flags = 0; 422 - fie.fi_extents_max = 2; 423 - fie.fi_extents_start = (struct fiemap_extent __user *) &ext; 424 - while (1) { 425 - mm_segment_t old_fs = get_fs(); 426 420 427 - fie.fi_extents_mapped = 0; 428 - memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); 421 + blkbits = inode->i_sb->s_blocksize_bits; 422 + start = offset >> blkbits; 423 + last = start; 424 + end = isize >> blkbits; 425 + dataoff = offset; 429 426 430 - set_fs(get_ds()); 431 - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 432 - set_fs(old_fs); 433 - if (ret) 434 - break; 435 - 436 - /* No extents found, EOF */ 437 - if (!fie.fi_extents_mapped) { 438 - ret = -ENXIO; 427 + do { 428 + map.m_lblk = last; 429 + map.m_len = end - last + 1; 430 + ret = ext4_map_blocks(NULL, inode, &map, 0); 431 + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 432 + if (last != start) 433 + dataoff = (loff_t)last << blkbits; 439 434 break; 440 435 } 441 - for (i = 0; i < fie.fi_extents_mapped; i++) { 442 - next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); 443 436 444 - if (offset < (loff_t)ext[i].fe_logical) 445 - offset = (loff_t)ext[i].fe_logical; 446 - /* 447 - * If extent is not unwritten, then it contains valid 448 - * data, mapped or delayed. 449 - */ 450 - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) 451 - goto out; 452 - 453 - /* 454 - * If there is a unwritten extent at this offset, 455 - * it will be as a data or a hole according to page 456 - * cache that has data or not. 457 - */ 458 - if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, 459 - next, &offset)) 460 - goto out; 461 - 462 - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { 463 - ret = -ENXIO; 464 - goto out; 465 - } 466 - offset = next; 437 + /* 438 + * If there is a delay extent at this offset, 439 + * it will be as a data. 440 + */ 441 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 442 + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 443 + if (last != start) 444 + dataoff = (loff_t)last << blkbits; 445 + break; 467 446 } 468 - } 469 - if (offset > inode->i_size) 470 - offset = inode->i_size; 471 - out: 447 + 448 + /* 449 + * If there is a unwritten extent at this offset, 450 + * it will be as a data or a hole according to page 451 + * cache that has data or not. 452 + */ 453 + if (map.m_flags & EXT4_MAP_UNWRITTEN) { 454 + int unwritten; 455 + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, 456 + &map, &dataoff); 457 + if (unwritten) 458 + break; 459 + } 460 + 461 + last++; 462 + dataoff = (loff_t)last << blkbits; 463 + } while (last <= end); 464 + 472 465 mutex_unlock(&inode->i_mutex); 473 - if (ret) 474 - return ret; 475 466 476 - return vfs_setpos(file, offset, maxsize); 467 + if (dataoff > isize) 468 + return -ENXIO; 469 + 470 + return vfs_setpos(file, dataoff, maxsize); 477 471 } 478 472 479 473 /* 480 - * ext4_seek_hole() retrieves the offset for SEEK_HOLE 474 + * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 481 475 */ 482 476 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 483 477 { 484 478 struct inode *inode = file->f_mapping->host; 485 - struct fiemap_extent_info fie; 486 - struct fiemap_extent ext[2]; 487 - loff_t next; 488 - int i, ret = 0; 479 + struct ext4_map_blocks map; 480 + struct extent_status es; 481 + ext4_lblk_t start, last, end; 482 + loff_t holeoff, isize; 483 + int blkbits; 484 + int ret = 0; 489 485 490 486 mutex_lock(&inode->i_mutex); 491 - if (offset >= inode->i_size) { 487 + 488 + isize = i_size_read(inode); 489 + if (offset >= isize) { 492 490 mutex_unlock(&inode->i_mutex); 493 491 return -ENXIO; 494 492 } 495 493 496 - fie.fi_flags = 0; 497 - fie.fi_extents_max = 2; 498 - fie.fi_extents_start = (struct fiemap_extent __user *)&ext; 499 - while (1) { 500 - mm_segment_t old_fs = get_fs(); 494 + blkbits = inode->i_sb->s_blocksize_bits; 495 + start = offset >> blkbits; 496 + last = start; 497 + end = isize >> blkbits; 498 + holeoff = offset; 501 499 502 - fie.fi_extents_mapped = 0; 503 - memset(ext, 0, sizeof(*ext)); 500 + do { 501 + map.m_lblk = last; 502 + map.m_len = end - last + 1; 503 + ret = ext4_map_blocks(NULL, inode, &map, 0); 504 + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 505 + last += ret; 506 + holeoff = (loff_t)last << blkbits; 507 + continue; 508 + } 504 509 505 - set_fs(get_ds()); 506 - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); 507 - set_fs(old_fs); 508 - if (ret) 509 - break; 510 + /* 511 + * If there is a delay extent at this offset, 512 + * we will skip this extent. 513 + */ 514 + ext4_es_find_delayed_extent_range(inode, last, last, &es); 515 + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 516 + last = es.es_lblk + es.es_len; 517 + holeoff = (loff_t)last << blkbits; 518 + continue; 519 + } 510 520 511 - /* No extents found */ 512 - if (!fie.fi_extents_mapped) 513 - break; 514 - 515 - for (i = 0; i < fie.fi_extents_mapped; i++) { 516 - next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); 517 - /* 518 - * If extent is not unwritten, then it contains valid 519 - * data, mapped or delayed. 520 - */ 521 - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { 522 - if (offset < (loff_t)ext[i].fe_logical) 523 - goto out; 524 - offset = next; 521 + /* 522 + * If there is a unwritten extent at this offset, 523 + * it will be as a data or a hole according to page 524 + * cache that has data or not. 525 + */ 526 + if (map.m_flags & EXT4_MAP_UNWRITTEN) { 527 + int unwritten; 528 + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 529 + &map, &holeoff); 530 + if (!unwritten) { 531 + last += ret; 532 + holeoff = (loff_t)last << blkbits; 525 533 continue; 526 534 } 527 - /* 528 - * If there is a unwritten extent at this offset, 529 - * it will be as a data or a hole according to page 530 - * cache that has data or not. 531 - */ 532 - if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 533 - next, &offset)) 534 - goto out; 535 - 536 - offset = next; 537 - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) 538 - goto out; 539 535 } 540 - } 541 - if (offset > inode->i_size) 542 - offset = inode->i_size; 543 - out: 544 - mutex_unlock(&inode->i_mutex); 545 - if (ret) 546 - return ret; 547 536 548 - return vfs_setpos(file, offset, maxsize); 537 + /* find a hole */ 538 + break; 539 + } while (last <= end); 540 + 541 + mutex_unlock(&inode->i_mutex); 542 + 543 + if (holeoff > isize) 544 + holeoff = isize; 545 + 546 + return vfs_setpos(file, holeoff, maxsize); 549 547 } 550 548 551 549 /*
+12 -12
fs/ext4/resize.c
··· 24 24 return -EPERM; 25 25 26 26 /* 27 + * If we are not using the primary superblock/GDT copy don't resize, 28 + * because the user tools have no way of handling this. Probably a 29 + * bad time to do it anyways. 30 + */ 31 + if (EXT4_SB(sb)->s_sbh->b_blocknr != 32 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 33 + ext4_warning(sb, "won't resize using backup superblock at %llu", 34 + (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 35 + return -EPERM; 36 + } 37 + 38 + /* 27 39 * We are not allowed to do online-resizing on a filesystem mounted 28 40 * with error, because it can destroy the filesystem easily. 29 41 */ ··· 769 757 printk(KERN_DEBUG 770 758 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", 771 759 gdb_num); 772 - 773 - /* 774 - * If we are not using the primary superblock/GDT copy don't resize, 775 - * because the user tools have no way of handling this. Probably a 776 - * bad time to do it anyways. 777 - */ 778 - if (EXT4_SB(sb)->s_sbh->b_blocknr != 779 - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 780 - ext4_warning(sb, "won't resize using backup superblock at %llu", 781 - (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 782 - return -EPERM; 783 - } 784 760 785 761 gdb_bh = sb_bread(sb, gdblock); 786 762 if (!gdb_bh)
+1 -1
fs/ext4/super.c
··· 3482 3482 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3483 3483 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && 3484 3484 EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3485 - ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " 3485 + ext4_warning(sb, "metadata_csum and uninit_bg are " 3486 3486 "redundant flags; please run fsck."); 3487 3487 3488 3488 /* Check for a known checksum algorithm */
+3 -2
fs/fcntl.c
··· 740 740 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY 741 741 * is defined as O_NONBLOCK on some platforms and not on others. 742 742 */ 743 - BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 743 + BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( 744 744 O_RDONLY | O_WRONLY | O_RDWR | 745 745 O_CREAT | O_EXCL | O_NOCTTY | 746 746 O_TRUNC | O_APPEND | /* O_NONBLOCK | */ 747 747 __O_SYNC | O_DSYNC | FASYNC | 748 748 O_DIRECT | O_LARGEFILE | O_DIRECTORY | 749 749 O_NOFOLLOW | O_NOATIME | O_CLOEXEC | 750 - __FMODE_EXEC | O_PATH | __O_TMPFILE 750 + __FMODE_EXEC | O_PATH | __O_TMPFILE | 751 + __FMODE_NONOTIFY 751 752 )); 752 753 753 754 fasync_cache = kmem_cache_create("fasync_cache",
+49 -2
fs/fuse/dev.c
··· 131 131 req->in.h.pid = current->pid; 132 132 } 133 133 134 + void fuse_set_initialized(struct fuse_conn *fc) 135 + { 136 + /* Make sure stores before this are seen on another CPU */ 137 + smp_wmb(); 138 + fc->initialized = 1; 139 + } 140 + 134 141 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) 135 142 { 136 143 return !fc->initialized || (for_background && fc->blocked); ··· 162 155 if (intr) 163 156 goto out; 164 157 } 158 + /* Matches smp_wmb() in fuse_set_initialized() */ 159 + smp_rmb(); 165 160 166 161 err = -ENOTCONN; 167 162 if (!fc->connected) ··· 262 253 263 254 atomic_inc(&fc->num_waiting); 264 255 wait_event(fc->blocked_waitq, fc->initialized); 256 + /* Matches smp_wmb() in fuse_set_initialized() */ 257 + smp_rmb(); 265 258 req = fuse_request_alloc(0); 266 259 if (!req) 267 260 req = get_reserved_req(fc, file); ··· 522 511 } 523 512 EXPORT_SYMBOL_GPL(fuse_request_send); 524 513 514 + static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) 515 + { 516 + if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) 517 + args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; 518 + 519 + if (fc->minor < 9) { 520 + switch (args->in.h.opcode) { 521 + case FUSE_LOOKUP: 522 + case FUSE_CREATE: 523 + case FUSE_MKNOD: 524 + case FUSE_MKDIR: 525 + case FUSE_SYMLINK: 526 + case FUSE_LINK: 527 + args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 528 + break; 529 + case FUSE_GETATTR: 530 + case FUSE_SETATTR: 531 + args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 532 + break; 533 + } 534 + } 535 + if (fc->minor < 12) { 536 + switch (args->in.h.opcode) { 537 + case FUSE_CREATE: 538 + args->in.args[0].size = sizeof(struct fuse_open_in); 539 + break; 540 + case FUSE_MKNOD: 541 + args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; 542 + break; 543 + } 544 + } 545 + } 546 + 525 547 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) 526 548 { 527 549 struct fuse_req *req; ··· 563 519 req = fuse_get_req(fc, 0); 564 520 if (IS_ERR(req)) 565 521 return PTR_ERR(req); 522 + 523 + /* Needs to be done after fuse_get_req() so that fc->minor is valid */ 524 + fuse_adjust_compat(fc, args); 566 525 567 526 req->in.h.opcode = args->in.h.opcode; 568 527 req->in.h.nodeid = args->in.h.nodeid; ··· 2174 2127 if (fc->connected) { 2175 2128 fc->connected = 0; 2176 2129 fc->blocked = 0; 2177 - fc->initialized = 1; 2130 + fuse_set_initialized(fc); 2178 2131 end_io_requests(fc); 2179 2132 end_queued_requests(fc); 2180 2133 end_polls(fc); ··· 2193 2146 spin_lock(&fc->lock); 2194 2147 fc->connected = 0; 2195 2148 fc->blocked = 0; 2196 - fc->initialized = 1; 2149 + fuse_set_initialized(fc); 2197 2150 end_queued_requests(fc); 2198 2151 end_polls(fc); 2199 2152 wake_up_all(&fc->blocked_waitq);
+7 -24
fs/fuse/dir.c
··· 156 156 args->in.args[0].size = name->len + 1; 157 157 args->in.args[0].value = name->name; 158 158 args->out.numargs = 1; 159 - if (fc->minor < 9) 160 - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 161 - else 162 - args->out.args[0].size = sizeof(struct fuse_entry_out); 159 + args->out.args[0].size = sizeof(struct fuse_entry_out); 163 160 args->out.args[0].value = outarg; 164 161 } 165 162 ··· 419 422 args.in.h.opcode = FUSE_CREATE; 420 423 args.in.h.nodeid = get_node_id(dir); 421 424 args.in.numargs = 2; 422 - args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : 423 - sizeof(inarg); 425 + args.in.args[0].size = sizeof(inarg); 424 426 args.in.args[0].value = &inarg; 425 427 args.in.args[1].size = entry->d_name.len + 1; 426 428 args.in.args[1].value = entry->d_name.name; 427 429 args.out.numargs = 2; 428 - if (fc->minor < 9) 429 - args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 430 - else 431 - args.out.args[0].size = sizeof(outentry); 430 + args.out.args[0].size = sizeof(outentry); 432 431 args.out.args[0].value = &outentry; 433 432 args.out.args[1].size = sizeof(outopen); 434 433 args.out.args[1].value = &outopen; ··· 532 539 memset(&outarg, 0, sizeof(outarg)); 533 540 args->in.h.nodeid = get_node_id(dir); 534 541 args->out.numargs = 1; 535 - if (fc->minor < 9) 536 - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; 537 - else 538 - args->out.args[0].size = sizeof(outarg); 542 + args->out.args[0].size = sizeof(outarg); 539 543 args->out.args[0].value = &outarg; 540 544 err = fuse_simple_request(fc, args); 541 545 if (err) ··· 582 592 inarg.umask = current_umask(); 583 593 args.in.h.opcode = FUSE_MKNOD; 584 594 args.in.numargs = 2; 585 - args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : 586 - sizeof(inarg); 595 + args.in.args[0].size = sizeof(inarg); 587 596 args.in.args[0].value = &inarg; 588 597 args.in.args[1].size = entry->d_name.len + 1; 589 598 args.in.args[1].value = entry->d_name.name; ··· 888 899 args.in.args[0].size = sizeof(inarg); 889 900 args.in.args[0].value = &inarg; 890 901 args.out.numargs = 1; 891 - if (fc->minor < 9) 892 - args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 893 - else 894 - args.out.args[0].size = sizeof(outarg); 902 + args.out.args[0].size = sizeof(outarg); 895 903 args.out.args[0].value = &outarg; 896 904 err = fuse_simple_request(fc, &args); 897 905 if (!err) { ··· 1560 1574 args->in.args[0].size = sizeof(*inarg_p); 1561 1575 args->in.args[0].value = inarg_p; 1562 1576 args->out.numargs = 1; 1563 - if (fc->minor < 9) 1564 - args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; 1565 - else 1566 - args->out.args[0].size = sizeof(*outarg_p); 1577 + args->out.args[0].size = sizeof(*outarg_p); 1567 1578 args->out.args[0].value = outarg_p; 1568 1579 } 1569 1580
+2
fs/fuse/fuse_i.h
··· 906 906 int fuse_do_setattr(struct inode *inode, struct iattr *attr, 907 907 struct file *file); 908 908 909 + void fuse_set_initialized(struct fuse_conn *fc); 910 + 909 911 #endif /* _FS_FUSE_I_H */
+2 -3
fs/fuse/inode.c
··· 424 424 args.in.h.opcode = FUSE_STATFS; 425 425 args.in.h.nodeid = get_node_id(dentry->d_inode); 426 426 args.out.numargs = 1; 427 - args.out.args[0].size = 428 - fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); 427 + args.out.args[0].size = sizeof(outarg); 429 428 args.out.args[0].value = &outarg; 430 429 err = fuse_simple_request(fc, &args); 431 430 if (!err) ··· 897 898 fc->max_write = max_t(unsigned, 4096, fc->max_write); 898 899 fc->conn_init = 1; 899 900 } 900 - fc->initialized = 1; 901 + fuse_set_initialized(fc); 901 902 wake_up_all(&fc->blocked_waitq); 902 903 } 903 904
+3
fs/isofs/rock.c
··· 362 362 rs.cont_size = isonum_733(rr->u.CE.size); 363 363 break; 364 364 case SIG('E', 'R'): 365 + /* Invalid length of ER tag id? */ 366 + if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len) 367 + goto out; 365 368 ISOFS_SB(inode->i_sb)->s_rock = 1; 366 369 printk(KERN_DEBUG "ISO 9660 Extensions: "); 367 370 {
+8 -4
fs/kernfs/dir.c
··· 201 201 static int kernfs_name_compare(unsigned int hash, const char *name, 202 202 const void *ns, const struct kernfs_node *kn) 203 203 { 204 - if (hash != kn->hash) 205 - return hash - kn->hash; 206 - if (ns != kn->ns) 207 - return ns - kn->ns; 204 + if (hash < kn->hash) 205 + return -1; 206 + if (hash > kn->hash) 207 + return 1; 208 + if (ns < kn->ns) 209 + return -1; 210 + if (ns > kn->ns) 211 + return 1; 208 212 return strcmp(name, kn->name); 209 213 } 210 214
+4 -4
fs/lockd/svc.c
··· 138 138 139 139 dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); 140 140 141 - if (!nlm_timeout) 142 - nlm_timeout = LOCKD_DFLT_TIMEO; 143 - nlmsvc_timeout = nlm_timeout * HZ; 144 - 145 141 /* 146 142 * The main request loop. We don't terminate until the last 147 143 * NFS mount or NFS daemon has gone away. ··· 345 349 if (nlmsvc_users) 346 350 printk(KERN_WARNING 347 351 "lockd_up: no pid, %d users??\n", nlmsvc_users); 352 + 353 + if (!nlm_timeout) 354 + nlm_timeout = LOCKD_DFLT_TIMEO; 355 + nlmsvc_timeout = nlm_timeout * HZ; 348 356 349 357 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); 350 358 if (!serv) {
+1 -1
fs/locks.c
··· 1702 1702 break; 1703 1703 } 1704 1704 trace_generic_delete_lease(inode, fl); 1705 - if (fl) 1705 + if (fl && IS_LEASE(fl)) 1706 1706 error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); 1707 1707 spin_unlock(&inode->i_lock); 1708 1708 locks_dispose_list(&dispose);
+27 -15
fs/nfs/nfs4client.c
··· 228 228 kfree(clp->cl_serverowner); 229 229 kfree(clp->cl_serverscope); 230 230 kfree(clp->cl_implid); 231 + kfree(clp->cl_owner_id); 231 232 } 232 233 233 234 void nfs4_free_client(struct nfs_client *clp) ··· 453 452 spin_unlock(&nn->nfs_client_lock); 454 453 } 455 454 455 + static bool nfs4_match_client_owner_id(const struct nfs_client *clp1, 456 + const struct nfs_client *clp2) 457 + { 458 + if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL) 459 + return true; 460 + return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0; 461 + } 462 + 456 463 /** 457 464 * nfs40_walk_client_list - Find server that recognizes a client ID 458 465 * ··· 492 483 if (pos->rpc_ops != new->rpc_ops) 493 484 continue; 494 485 495 - if (pos->cl_proto != new->cl_proto) 496 - continue; 497 - 498 486 if (pos->cl_minorversion != new->cl_minorversion) 499 487 continue; 500 488 ··· 514 508 continue; 515 509 516 510 if (pos->cl_clientid != new->cl_clientid) 511 + continue; 512 + 513 + if (!nfs4_match_client_owner_id(pos, new)) 517 514 continue; 518 515 519 516 atomic_inc(&pos->cl_count); ··· 575 566 } 576 567 577 568 /* 578 - * Returns true if the server owners match 569 + * Returns true if the server major ids match 579 570 */ 580 571 static bool 581 - nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) 572 + nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) 582 573 { 583 574 struct nfs41_server_owner *o1 = a->cl_serverowner; 584 575 struct nfs41_server_owner *o2 = b->cl_serverowner; 585 - 586 - if (o1->minor_id != o2->minor_id) { 587 - dprintk("NFS: --> %s server owner minor IDs do not match\n", 588 - __func__); 589 - return false; 590 - } 591 576 592 577 if (o1->major_id_sz != o2->major_id_sz) 593 578 goto out_major_mismatch; ··· 624 621 if (pos->rpc_ops != new->rpc_ops) 625 622 continue; 626 623 627 - if (pos->cl_proto != new->cl_proto) 628 - continue; 629 - 630 624 if (pos->cl_minorversion != new->cl_minorversion) 631 625 continue; 632 626 ··· 654 654 if (!nfs4_match_clientids(pos, new)) 655 655 continue; 656 656 657 - if (!nfs4_match_serverowners(pos, new)) 657 + /* 658 + * Note that session trunking is just a special subcase of 659 + * client id trunking. In either case, we want to fall back 660 + * to using the existing nfs_client. 661 + */ 662 + if (!nfs4_check_clientid_trunking(pos, new)) 663 + continue; 664 + 665 + /* Unlike NFSv4.0, we know that NFSv4.1 always uses the 666 + * uniform string, however someone might switch the 667 + * uniquifier string on us. 668 + */ 669 + if (!nfs4_match_client_owner_id(pos, new)) 658 670 continue; 659 671 660 672 atomic_inc(&pos->cl_count);
+15 -6
fs/nfs/nfs4proc.c
··· 1117 1117 return 0; 1118 1118 if ((delegation->type & fmode) != fmode) 1119 1119 return 0; 1120 - if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1121 - return 0; 1122 1120 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1123 1121 return 0; 1124 1122 nfs_mark_delegation_referenced(delegation); ··· 4915 4917 } 4916 4918 4917 4919 static unsigned int 4918 - nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4920 + nfs4_init_nonuniform_client_string(struct nfs_client *clp, 4919 4921 char *buf, size_t len) 4920 4922 { 4921 4923 unsigned int result; 4924 + 4925 + if (clp->cl_owner_id != NULL) 4926 + return strlcpy(buf, clp->cl_owner_id, len); 4922 4927 4923 4928 rcu_read_lock(); 4924 4929 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", ··· 4931 4930 rpc_peeraddr2str(clp->cl_rpcclient, 4932 4931 RPC_DISPLAY_PROTO)); 4933 4932 rcu_read_unlock(); 4933 + clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 4934 4934 return result; 4935 4935 } 4936 4936 4937 4937 static unsigned int 4938 - nfs4_init_uniform_client_string(const struct nfs_client *clp, 4938 + nfs4_init_uniform_client_string(struct nfs_client *clp, 4939 4939 char *buf, size_t len) 4940 4940 { 4941 4941 const char *nodename = clp->cl_rpcclient->cl_nodename; 4942 + unsigned int result; 4943 + 4944 + if (clp->cl_owner_id != NULL) 4945 + return strlcpy(buf, clp->cl_owner_id, len); 4942 4946 4943 4947 if (nfs4_client_id_uniquifier[0] != '\0') 4944 - return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", 4948 + result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", 4945 4949 clp->rpc_ops->version, 4946 4950 clp->cl_minorversion, 4947 4951 nfs4_client_id_uniquifier, 4948 4952 nodename); 4949 - return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4953 + else 4954 + result = scnprintf(buf, len, "Linux NFSv%u.%u %s", 4950 4955 clp->rpc_ops->version, clp->cl_minorversion, 4951 4956 nodename); 4957 + clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 4958 + return result; 4952 4959 } 4953 4960 4954 4961 /*
+1 -1
fs/nfsd/nfs4state.c
··· 3897 3897 status = nfs4_setlease(dp); 3898 3898 goto out; 3899 3899 } 3900 - atomic_inc(&fp->fi_delegees); 3901 3900 if (fp->fi_had_conflict) { 3902 3901 status = -EAGAIN; 3903 3902 goto out_unlock; 3904 3903 } 3904 + atomic_inc(&fp->fi_delegees); 3905 3905 hash_delegation_locked(dp, fp); 3906 3906 status = 0; 3907 3907 out_unlock:
+5 -5
fs/notify/fanotify/fanotify_user.c
··· 259 259 struct fsnotify_event *kevent; 260 260 char __user *start; 261 261 int ret; 262 - DEFINE_WAIT(wait); 262 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 263 263 264 264 start = buf; 265 265 group = file->private_data; 266 266 267 267 pr_debug("%s: group=%p\n", __func__, group); 268 268 269 + add_wait_queue(&group->notification_waitq, &wait); 269 270 while (1) { 270 - prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); 271 - 272 271 mutex_lock(&group->notification_mutex); 273 272 kevent = get_one_event(group, count); 274 273 mutex_unlock(&group->notification_mutex); ··· 288 289 289 290 if (start != buf) 290 291 break; 291 - schedule(); 292 + 293 + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 292 294 continue; 293 295 } 294 296 ··· 318 318 buf += ret; 319 319 count -= ret; 320 320 } 321 + remove_wait_queue(&group->notification_waitq, &wait); 321 322 322 - finish_wait(&group->notification_waitq, &wait); 323 323 if (start != buf && ret != -EFAULT) 324 324 ret = buf - start; 325 325 return ret;
+1 -4
fs/ocfs2/dlm/dlmrecovery.c
··· 2023 2023 dlm_lockres_drop_inflight_ref(dlm, res); 2024 2024 spin_unlock(&res->spinlock); 2025 2025 2026 - if (ret < 0) { 2026 + if (ret < 0) 2027 2027 mlog_errno(ret); 2028 - if (newlock) 2029 - dlm_lock_put(newlock); 2030 - } 2031 2028 2032 2029 return ret; 2033 2030 }
+35 -8
fs/ocfs2/namei.c
··· 94 94 struct inode *inode, 95 95 const char *symname); 96 96 97 + static int ocfs2_double_lock(struct ocfs2_super *osb, 98 + struct buffer_head **bh1, 99 + struct inode *inode1, 100 + struct buffer_head **bh2, 101 + struct inode *inode2, 102 + int rename); 103 + 104 + static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); 97 105 /* An orphan dir name is an 8 byte value, printed as a hex string */ 98 106 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) 99 107 ··· 686 678 { 687 679 handle_t *handle; 688 680 struct inode *inode = old_dentry->d_inode; 681 + struct inode *old_dir = old_dentry->d_parent->d_inode; 689 682 int err; 690 683 struct buffer_head *fe_bh = NULL; 684 + struct buffer_head *old_dir_bh = NULL; 691 685 struct buffer_head *parent_fe_bh = NULL; 692 686 struct ocfs2_dinode *fe = NULL; 693 687 struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); ··· 706 696 707 697 dquot_initialize(dir); 708 698 709 - err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); 699 + err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, 700 + &parent_fe_bh, dir, 0); 710 701 if (err < 0) { 711 702 if (err != -ENOENT) 712 703 mlog_errno(err); 713 704 return err; 705 + } 706 + 707 + /* make sure both dirs have bhs 708 + * get an extra ref on old_dir_bh if old==new */ 709 + if (!parent_fe_bh) { 710 + if (old_dir_bh) { 711 + parent_fe_bh = old_dir_bh; 712 + get_bh(parent_fe_bh); 713 + } else { 714 + mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); 715 + err = -EIO; 716 + goto out; 717 + } 714 718 } 715 719 716 720 if (!dir->i_nlink) { ··· 732 708 goto out; 733 709 } 734 710 735 - err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, 711 + err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, 736 712 old_dentry->d_name.len, &old_de_ino); 737 713 if (err) { 738 714 err = -ENOENT; ··· 825 801 ocfs2_inode_unlock(inode, 1); 826 802 827 803 out: 828 - ocfs2_inode_unlock(dir, 1); 804 + ocfs2_double_unlock(old_dir, dir); 829 805 830 806 brelse(fe_bh); 831 807 brelse(parent_fe_bh); 808 + brelse(old_dir_bh); 832 809 833 810 ocfs2_free_dir_lookup_result(&lookup); 834 811 ··· 1097 1072 } 1098 1073 1099 1074 /* 1100 - * The only place this should be used is rename! 1075 + * The only place this should be used is rename and link! 1101 1076 * if they have the same id, then the 1st one is the only one locked. 1102 1077 */ 1103 1078 static int ocfs2_double_lock(struct ocfs2_super *osb, 1104 1079 struct buffer_head **bh1, 1105 1080 struct inode *inode1, 1106 1081 struct buffer_head **bh2, 1107 - struct inode *inode2) 1082 + struct inode *inode2, 1083 + int rename) 1108 1084 { 1109 1085 int status; 1110 1086 int inode1_is_ancestor, inode2_is_ancestor; ··· 1153 1127 } 1154 1128 /* lock id2 */ 1155 1129 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 1156 - OI_LS_RENAME1); 1130 + rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); 1157 1131 if (status < 0) { 1158 1132 if (status != -ENOENT) 1159 1133 mlog_errno(status); ··· 1162 1136 } 1163 1137 1164 1138 /* lock id1 */ 1165 - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); 1139 + status = ocfs2_inode_lock_nested(inode1, bh1, 1, 1140 + rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); 1166 1141 if (status < 0) { 1167 1142 /* 1168 1143 * An error return must mean that no cluster locks ··· 1279 1252 1280 1253 /* if old and new are the same, this'll just do one lock. */ 1281 1254 status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, 1282 - &new_dir_bh, new_dir); 1255 + &new_dir_bh, new_dir, 1); 1283 1256 if (status < 0) { 1284 1257 mlog_errno(status); 1285 1258 goto bail;
+16 -15
fs/udf/dir.c
··· 57 57 sector_t offset; 58 58 int i, num, ret = 0; 59 59 struct extent_position epos = { NULL, 0, {0, 0} }; 60 + struct super_block *sb = dir->i_sb; 60 61 61 62 if (ctx->pos == 0) { 62 63 if (!dir_emit_dot(file, ctx)) ··· 77 76 if (nf_pos == 0) 78 77 nf_pos = udf_ext0_offset(dir); 79 78 80 - fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); 79 + fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1); 81 80 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 82 - if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, 81 + if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits, 83 82 &epos, &eloc, &elen, &offset) 84 83 != (EXT_RECORDED_ALLOCATED >> 30)) { 85 84 ret = -ENOENT; 86 85 goto out; 87 86 } 88 - block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); 89 - if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 87 + block = udf_get_lb_pblock(sb, &eloc, offset); 88 + if ((++offset << sb->s_blocksize_bits) < elen) { 90 89 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 91 90 epos.offset -= sizeof(struct short_ad); 92 91 else if (iinfo->i_alloc_type == ··· 96 95 offset = 0; 97 96 } 98 97 99 - if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { 98 + if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) { 100 99 ret = -EIO; 101 100 goto out; 102 101 } 103 102 104 - if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { 105 - i = 16 >> (dir->i_sb->s_blocksize_bits - 9); 106 - if (i + offset > (elen >> dir->i_sb->s_blocksize_bits)) 107 - i = (elen >> dir->i_sb->s_blocksize_bits) - offset; 103 + if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) { 104 + i = 16 >> (sb->s_blocksize_bits - 9); 105 + if (i + offset > (elen >> sb->s_blocksize_bits)) 106 + i = (elen >> sb->s_blocksize_bits) - offset; 108 107 for (num = 0; i > 0; i--) { 109 - block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i); 110 - tmp = udf_tgetblk(dir->i_sb, block); 108 + block = udf_get_lb_pblock(sb, &eloc, offset + i); 109 + tmp = udf_tgetblk(sb, block); 111 110 if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp)) 112 111 bha[num++] = tmp; 113 112 else ··· 153 152 } 154 153 155 154 if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 156 - if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) 155 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) 157 156 continue; 158 157 } 159 158 160 159 if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { 161 - if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) 160 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) 162 161 continue; 163 162 } 164 163 ··· 168 167 continue; 169 168 } 170 169 171 - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); 170 + flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN); 172 171 if (!flen) 173 172 continue; 174 173 175 174 tloc = lelb_to_cpu(cfi.icb.extLocation); 176 - iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0); 175 + iblock = udf_get_lb_pblock(sb, &tloc, 0); 177 176 if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN)) 178 177 goto out; 179 178 } /* end while */
+14
fs/udf/inode.c
··· 1489 1489 } 1490 1490 inode->i_generation = iinfo->i_unique; 1491 1491 1492 + /* Sanity checks for files in ICB so that we don't get confused later */ 1493 + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1494 + /* 1495 + * For file in ICB data is stored in allocation descriptor 1496 + * so sizes should match 1497 + */ 1498 + if (iinfo->i_lenAlloc != inode->i_size) 1499 + goto out; 1500 + /* File in ICB has to fit in there... */ 1501 + if (inode->i_size > inode->i_sb->s_blocksize - 1502 + udf_file_entry_alloc_offset(inode)) 1503 + goto out; 1504 + } 1505 + 1492 1506 switch (fe->icbTag.fileType) { 1493 1507 case ICBTAG_FILE_TYPE_DIRECTORY: 1494 1508 inode->i_op = &udf_dir_inode_operations;
+9 -8
fs/udf/namei.c
··· 159 159 struct udf_inode_info *dinfo = UDF_I(dir); 160 160 int isdotdot = child->len == 2 && 161 161 child->name[0] == '.' && child->name[1] == '.'; 162 + struct super_block *sb = dir->i_sb; 162 163 163 164 size = udf_ext0_offset(dir) + dir->i_size; 164 165 f_pos = udf_ext0_offset(dir); 165 166 166 167 fibh->sbh = fibh->ebh = NULL; 167 - fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); 168 + fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1); 168 169 if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 169 - if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, 170 + if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos, 170 171 &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) 171 172 goto out_err; 172 - block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); 173 - if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { 173 + block = udf_get_lb_pblock(sb, &eloc, offset); 174 + if ((++offset << sb->s_blocksize_bits) < elen) { 174 175 if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 175 176 epos.offset -= sizeof(struct short_ad); 176 177 else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) ··· 179 178 } else 180 179 offset = 0; 181 180 182 - fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); 181 + fibh->sbh = fibh->ebh = udf_tread(sb, block); 183 182 if (!fibh->sbh) 184 183 goto out_err; 185 184 } ··· 218 217 } 219 218 220 219 if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { 221 - if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) 220 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) 222 221 continue; 223 222 } 224 223 225 224 if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { 226 - if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) 225 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) 227 226 continue; 228 227 } 229 228 ··· 234 233 if (!lfi) 235 234 continue; 236 235 237 - flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); 236 + flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN); 238 237 if (flen && udf_match(flen, fname, child->len, child->name)) 239 238 goto out_ok; 240 239 }
+46 -11
fs/udf/symlink.c
··· 30 30 #include <linux/buffer_head.h> 31 31 #include "udf_i.h" 32 32 33 - static void udf_pc_to_char(struct super_block *sb, unsigned char *from, 34 - int fromlen, unsigned char *to) 33 + static int udf_pc_to_char(struct super_block *sb, unsigned char *from, 34 + int fromlen, unsigned char *to, int tolen) 35 35 { 36 36 struct pathComponent *pc; 37 37 int elen = 0; 38 + int comp_len; 38 39 unsigned char *p = to; 39 40 41 + /* Reserve one byte for terminating \0 */ 42 + tolen--; 40 43 while (elen < fromlen) { 41 44 pc = (struct pathComponent *)(from + elen); 45 + elen += sizeof(struct pathComponent); 42 46 switch (pc->componentType) { 43 47 case 1: 44 48 /* 45 49 * Symlink points to some place which should be agreed 46 50 * upon between originator and receiver of the media. Ignore. 47 51 */ 48 - if (pc->lengthComponentIdent > 0) 52 + if (pc->lengthComponentIdent > 0) { 53 + elen += pc->lengthComponentIdent; 49 54 break; 55 + } 50 56 /* Fall through */ 51 57 case 2: 58 + if (tolen == 0) 59 + return -ENAMETOOLONG; 52 60 p = to; 53 61 *p++ = '/'; 62 + tolen--; 54 63 break; 55 64 case 3: 65 + if (tolen < 3) 66 + return -ENAMETOOLONG; 56 67 memcpy(p, "../", 3); 57 68 p += 3; 69 + tolen -= 3; 58 70 break; 59 71 case 4: 72 + if (tolen < 2) 73 + return -ENAMETOOLONG; 60 74 memcpy(p, "./", 2); 61 75 p += 2; 76 + tolen -= 2; 62 77 /* that would be . - just ignore */ 63 78 break; 64 79 case 5: 65 - p += udf_get_filename(sb, pc->componentIdent, p, 66 - pc->lengthComponentIdent); 80 + elen += pc->lengthComponentIdent; 81 + if (elen > fromlen) 82 + return -EIO; 83 + comp_len = udf_get_filename(sb, pc->componentIdent, 84 + pc->lengthComponentIdent, 85 + p, tolen); 86 + p += comp_len; 87 + tolen -= comp_len; 88 + if (tolen == 0) 89 + return -ENAMETOOLONG; 67 90 *p++ = '/'; 91 + tolen--; 68 92 break; 69 93 } 70 - elen += sizeof(struct pathComponent) + pc->lengthComponentIdent; 71 94 } 72 95 if (p > to + 1) 73 96 p[-1] = '\0'; 74 97 else 75 98 p[0] = '\0'; 99 + return 0; 76 100 } 77 101 78 102 static int udf_symlink_filler(struct file *file, struct page *page) ··· 104 80 struct inode *inode = page->mapping->host; 105 81 struct buffer_head *bh = NULL; 106 82 unsigned char *symlink; 107 - int err = -EIO; 83 + int err; 108 84 unsigned char *p = kmap(page); 109 85 struct udf_inode_info *iinfo; 110 86 uint32_t pos; 87 + 88 + /* We don't support symlinks longer than one block */ 89 + if (inode->i_size > inode->i_sb->s_blocksize) { 90 + err = -ENAMETOOLONG; 91 + goto out_unmap; 92 + } 111 93 112 94 iinfo = UDF_I(inode); 113 95 pos = udf_block_map(inode, 0); ··· 124 94 } else { 125 95 bh = sb_bread(inode->i_sb, pos); 126 96 127 - if (!bh) 128 - goto out; 97 + if (!bh) { 98 + err = -EIO; 99 + goto out_unlock_inode; 100 + } 129 101 130 102 symlink = bh->b_data; 131 103 } 132 104 133 - udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); 105 + err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE); 134 106 brelse(bh); 107 + if (err) 108 + goto out_unlock_inode; 135 109 136 110 up_read(&iinfo->i_data_sem); 137 111 SetPageUptodate(page); ··· 143 109 unlock_page(page); 144 110 return 0; 145 111 146 - out: 112 + out_unlock_inode: 147 113 up_read(&iinfo->i_data_sem); 148 114 SetPageError(page); 115 + out_unmap: 149 116 kunmap(page); 150 117 unlock_page(page); 151 118 return err;
+2 -1
fs/udf/udfdecl.h
··· 211 211 } 212 212 213 213 /* unicode.c */ 214 - extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int); 214 + extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *, 215 + int); 215 216 extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *, 216 217 int); 217 218 extern int udf_build_ustr(struct ustr *, dstring *, int);
+16 -12
fs/udf/unicode.c
··· 28 28 29 29 #include "udf_sb.h" 30 30 31 - static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int); 31 + static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *, 32 + int); 32 33 33 34 static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen) 34 35 { ··· 334 333 return u_len + 1; 335 334 } 336 335 337 - int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, 338 - int flen) 336 + int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen, 337 + uint8_t *dname, int dlen) 339 338 { 340 339 struct ustr *filename, *unifilename; 341 340 int len = 0; ··· 348 347 if (!unifilename) 349 348 goto out1; 350 349 351 - if (udf_build_ustr_exact(unifilename, sname, flen)) 350 + if (udf_build_ustr_exact(unifilename, sname, slen)) 352 351 goto out2; 353 352 354 353 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { ··· 367 366 } else 368 367 goto out2; 369 368 370 - len = udf_translate_to_linux(dname, filename->u_name, filename->u_len, 369 + len = udf_translate_to_linux(dname, dlen, 370 + filename->u_name, filename->u_len, 371 371 unifilename->u_name, unifilename->u_len); 372 372 out2: 373 373 kfree(unifilename); ··· 405 403 #define EXT_MARK '.' 406 404 #define CRC_MARK '#' 407 405 #define EXT_SIZE 5 406 + /* Number of chars we need to store generated CRC to make filename unique */ 407 + #define CRC_LEN 5 408 408 409 - static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, 410 - int udfLen, uint8_t *fidName, 411 - int fidNameLen) 409 + static int udf_translate_to_linux(uint8_t *newName, int newLen, 410 + uint8_t *udfName, int udfLen, 411 + uint8_t *fidName, int fidNameLen) 412 412 { 413 413 int index, newIndex = 0, needsCRC = 0; 414 414 int extIndex = 0, newExtIndex = 0, hasExt = 0; ··· 443 439 newExtIndex = newIndex; 444 440 } 445 441 } 446 - if (newIndex < 256) 442 + if (newIndex < newLen) 447 443 newName[newIndex++] = curr; 448 444 else 449 445 needsCRC = 1; ··· 471 467 } 472 468 ext[localExtIndex++] = curr; 473 469 } 474 - maxFilenameLen = 250 - localExtIndex; 470 + maxFilenameLen = newLen - CRC_LEN - localExtIndex; 475 471 if (newIndex > maxFilenameLen) 476 472 newIndex = maxFilenameLen; 477 473 else 478 474 newIndex = newExtIndex; 479 - } else if (newIndex > 250) 480 - newIndex = 250; 475 + } else if (newIndex > newLen - CRC_LEN) 476 + newIndex = newLen - CRC_LEN; 481 477 newName[newIndex++] = CRC_MARK; 482 478 valueCRC = crc_itu_t(0, fidName, fidNameLen); 483 479 newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
+4 -4
include/acpi/processor.h
··· 196 196 struct acpi_processor { 197 197 acpi_handle handle; 198 198 u32 acpi_id; 199 - u32 apic_id; 200 - u32 id; 199 + u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ 200 + u32 id; /* CPU logical ID allocated by OS */ 201 201 u32 pblk; 202 202 int performance_platform_limit; 203 203 int throttling_platform_limit; ··· 310 310 #endif /* CONFIG_CPU_FREQ */ 311 311 312 312 /* in processor_core.c */ 313 - int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); 314 - int acpi_map_cpuid(int apic_id, u32 acpi_id); 313 + int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); 314 + int acpi_map_cpuid(int phys_id, u32 acpi_id); 315 315 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 316 316 317 317 /* in processor_pdc.c */
+6 -2
include/asm-generic/tlb.h
··· 136 136 137 137 static inline void __tlb_reset_range(struct mmu_gather *tlb) 138 138 { 139 - tlb->start = TASK_SIZE; 140 - tlb->end = 0; 139 + if (tlb->fullmm) { 140 + tlb->start = tlb->end = ~0; 141 + } else { 142 + tlb->start = TASK_SIZE; 143 + tlb->end = 0; 144 + } 141 145 } 142 146 143 147 /*
+1 -1
include/dt-bindings/thermal/thermal.h
··· 11 11 #define _DT_BINDINGS_THERMAL_THERMAL_H 12 12 13 13 /* On cooling devices upper and lower limits */ 14 - #define THERMAL_NO_LIMIT (-1UL) 14 + #define THERMAL_NO_LIMIT (~0) 15 15 16 16 #endif 17 17
+2 -2
include/linux/acpi.h
··· 147 147 148 148 #ifdef CONFIG_ACPI_HOTPLUG_CPU 149 149 /* Arch dependent functions for cpu hotplug support */ 150 - int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 151 - int acpi_unmap_lsapic(int cpu); 150 + int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); 151 + int acpi_unmap_cpu(int cpu); 152 152 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 153 153 154 154 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
+6 -2
include/linux/blk-mq.h
··· 34 34 unsigned long flags; /* BLK_MQ_F_* flags */ 35 35 36 36 struct request_queue *queue; 37 - unsigned int queue_num; 38 37 struct blk_flush_queue *fq; 39 38 40 39 void *driver_data; ··· 53 54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 54 55 55 56 unsigned int numa_node; 56 - unsigned int cmd_size; /* per-request extra data */ 57 + unsigned int queue_num; 57 58 58 59 atomic_t nr_active; 59 60 ··· 194 195 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 195 196 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 196 197 198 + int blk_mq_request_started(struct request *rq); 197 199 void blk_mq_start_request(struct request *rq); 198 200 void blk_mq_end_request(struct request *rq, int error); 199 201 void __blk_mq_end_request(struct request *rq, int error); 200 202 201 203 void blk_mq_requeue_request(struct request *rq); 202 204 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 205 + void blk_mq_cancel_requeue_work(struct request_queue *q); 203 206 void blk_mq_kick_requeue_list(struct request_queue *q); 207 + void blk_mq_abort_requeue_list(struct request_queue *q); 204 208 void blk_mq_complete_request(struct request *rq); 205 209 206 210 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); ··· 214 212 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 215 213 void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 216 214 void *priv); 215 + void blk_mq_unfreeze_queue(struct request_queue *q); 216 + void blk_mq_freeze_queue_start(struct request_queue *q); 217 217 218 218 /* 219 219 * Driver command data is immediately after the request. So subtract request
+2
include/linux/blk_types.h
··· 190 190 __REQ_PM, /* runtime pm request */ 191 191 __REQ_HASHED, /* on IO scheduler merge hash */ 192 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 193 + __REQ_NO_TIMEOUT, /* requests may never expire */ 193 194 __REQ_NR_BITS, /* stops here */ 194 195 }; 195 196 ··· 244 243 #define REQ_PM (1ULL << __REQ_PM) 245 244 #define REQ_HASHED (1ULL << __REQ_HASHED) 246 245 #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 246 + #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) 247 247 248 248 #endif /* __LINUX_BLK_TYPES_H */
+2 -2
include/linux/ceph/osd_client.h
··· 87 87 struct ceph_osd_data osd_data; 88 88 } extent; 89 89 struct { 90 - __le32 name_len; 91 - __le32 value_len; 90 + u32 name_len; 91 + u32 value_len; 92 92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ 93 93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ 94 94 struct ceph_osd_data osd_data;
+6 -6
include/linux/compiler.h
··· 215 215 } 216 216 } 217 217 218 - static __always_inline void __assign_once_size(volatile void *p, void *res, int size) 218 + static __always_inline void __write_once_size(volatile void *p, void *res, int size) 219 219 { 220 220 switch (size) { 221 221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; ··· 235 235 /* 236 236 * Prevent the compiler from merging or refetching reads or writes. The 237 237 * compiler is also forbidden from reordering successive instances of 238 - * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the 238 + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 239 239 * compiler is aware of some particular ordering. One way to make the 240 240 * compiler aware of ordering is to put the two invocations of READ_ONCE, 241 - * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. 241 + * WRITE_ONCE or ACCESS_ONCE() in different C statements. 242 242 * 243 243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 244 244 * data types like structs or unions. If the size of the accessed data 245 245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 246 - * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a 246 + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 247 247 * compile-time warning. 248 248 * 249 249 * Their two major use cases are: (1) Mediating communication between ··· 257 257 #define READ_ONCE(x) \ 258 258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) 259 259 260 - #define ASSIGN_ONCE(val, x) \ 261 - ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) 260 + #define WRITE_ONCE(x, val) \ 261 + ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) 262 262 263 263 #endif /* __KERNEL__ */ 264 264
+3 -3
include/linux/cpu_cooling.h
··· 50 50 of_cpufreq_cooling_register(struct device_node *np, 51 51 const struct cpumask *clip_cpus) 52 52 { 53 - return NULL; 53 + return ERR_PTR(-ENOSYS); 54 54 } 55 55 #endif 56 56 ··· 65 65 static inline struct thermal_cooling_device * 66 66 cpufreq_cooling_register(const struct cpumask *clip_cpus) 67 67 { 68 - return NULL; 68 + return ERR_PTR(-ENOSYS); 69 69 } 70 70 static inline struct thermal_cooling_device * 71 71 of_cpufreq_cooling_register(struct device_node *np, 72 72 const struct cpumask *clip_cpus) 73 73 { 74 - return NULL; 74 + return ERR_PTR(-ENOSYS); 75 75 } 76 76 static inline 77 77 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-3
include/linux/cpuidle.h
··· 53 53 }; 54 54 55 55 /* Idle State Flags */ 56 - #define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ 57 56 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 58 57 #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 59 58 ··· 88 89 /** 89 90 * cpuidle_get_last_residency - retrieves the last state's residency time 90 91 * @dev: the target CPU 91 - * 92 - * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set 93 92 */ 94 93 static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) 95 94 {
+1 -1
include/linux/fs.h
··· 135 135 #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 136 136 137 137 /* File was opened by fanotify and shouldn't generate fanotify events */ 138 - #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 138 + #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) 139 139 140 140 /* 141 141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+53 -9
include/linux/kdb.h
··· 13 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 14 14 */ 15 15 16 + /* Shifted versions of the command enable bits are be used if the command 17 + * has no arguments (see kdb_check_flags). This allows commands, such as 18 + * go, to have different permissions depending upon whether it is called 19 + * with an argument. 20 + */ 21 + #define KDB_ENABLE_NO_ARGS_SHIFT 10 22 + 16 23 typedef enum { 17 - KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 18 - KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 19 - KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 20 - } kdb_repeat_t; 24 + KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ 25 + KDB_ENABLE_MEM_READ = (1 << 1), 26 + KDB_ENABLE_MEM_WRITE = (1 << 2), 27 + KDB_ENABLE_REG_READ = (1 << 3), 28 + KDB_ENABLE_REG_WRITE = (1 << 4), 29 + KDB_ENABLE_INSPECT = (1 << 5), 30 + KDB_ENABLE_FLOW_CTRL = (1 << 6), 31 + KDB_ENABLE_SIGNAL = (1 << 7), 32 + KDB_ENABLE_REBOOT = (1 << 8), 33 + /* User exposed values stop here, all remaining flags are 34 + * exclusively used to describe a commands behaviour. 35 + */ 36 + 37 + KDB_ENABLE_ALWAYS_SAFE = (1 << 9), 38 + KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, 39 + 40 + KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, 41 + KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ 42 + << KDB_ENABLE_NO_ARGS_SHIFT, 43 + KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE 44 + << KDB_ENABLE_NO_ARGS_SHIFT, 45 + KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ 46 + << KDB_ENABLE_NO_ARGS_SHIFT, 47 + KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE 48 + << KDB_ENABLE_NO_ARGS_SHIFT, 49 + KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT 50 + << KDB_ENABLE_NO_ARGS_SHIFT, 51 + KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL 52 + << KDB_ENABLE_NO_ARGS_SHIFT, 53 + KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL 54 + << KDB_ENABLE_NO_ARGS_SHIFT, 55 + KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT 56 + << KDB_ENABLE_NO_ARGS_SHIFT, 57 + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE 58 + << KDB_ENABLE_NO_ARGS_SHIFT, 59 + KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, 60 + 61 + KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ 62 + KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ 63 + } kdb_cmdflags_t; 21 64 22 65 typedef int (*kdb_func_t)(int, const char **); 23 66 ··· 105 62 #define KDB_BADLENGTH (-19) 106 63 #define KDB_NOBP (-20) 107 64 #define KDB_BADADDR (-21) 65 + #define KDB_NOPERM (-22) 108 66 109 67 /* 110 68 * kdb_diemsg ··· 190 146 191 147 /* Dynamic kdb shell command registration */ 192 148 extern int kdb_register(char *, kdb_func_t, char *, char *, short); 193 - extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 194 - short, kdb_repeat_t); 149 + extern int kdb_register_flags(char *, kdb_func_t, char *, char *, 150 + short, kdb_cmdflags_t); 195 151 extern int kdb_unregister(char *); 196 152 #else /* ! CONFIG_KGDB_KDB */ 197 153 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 198 154 static inline void kdb_init(int level) {} 199 155 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 200 156 char *help, short minlen) { return 0; } 201 - static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 202 - char *help, short minlen, 203 - kdb_repeat_t repeat) { return 0; } 157 + static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, 158 + char *help, short minlen, 159 + kdb_cmdflags_t flags) { return 0; } 204 160 static inline int kdb_unregister(char *cmd) { return 0; } 205 161 #endif /* CONFIG_KGDB_KDB */ 206 162 enum {
+2 -20
include/linux/mfd/stmpe.h
··· 50 50 STMPE_IDX_GPEDR_MSB, 51 51 STMPE_IDX_GPRER_LSB, 52 52 STMPE_IDX_GPFER_LSB, 53 + STMPE_IDX_GPPUR_LSB, 54 + STMPE_IDX_GPPDR_LSB, 53 55 STMPE_IDX_GPAFR_U_MSB, 54 56 STMPE_IDX_IEGPIOR_LSB, 55 57 STMPE_IDX_ISGPIOR_LSB, ··· 114 112 enum stmpe_block block); 115 113 extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); 116 114 extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); 117 - 118 - struct matrix_keymap_data; 119 - 120 - /** 121 - * struct stmpe_keypad_platform_data - STMPE keypad platform data 122 - * @keymap_data: key map table and size 123 - * @debounce_ms: debounce interval, in ms. Maximum is 124 - * %STMPE_KEYPAD_MAX_DEBOUNCE. 125 - * @scan_count: number of key scanning cycles to confirm key data. 126 - * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT. 127 - * @no_autorepeat: disable key autorepeat 128 - */ 129 - struct stmpe_keypad_platform_data { 130 - const struct matrix_keymap_data *keymap_data; 131 - unsigned int debounce_ms; 132 - unsigned int scan_count; 133 - bool no_autorepeat; 134 - }; 135 115 136 116 #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) 137 117 ··· 183 199 * @irq_gpio: gpio number over which irq will be requested (significant only if 184 200 * irq_over_gpio is true) 185 201 * @gpio: GPIO-specific platform data 186 - * @keypad: keypad-specific platform data 187 202 * @ts: touchscreen-specific platform data 188 203 */ 189 204 struct stmpe_platform_data { ··· 195 212 int autosleep_timeout; 196 213 197 214 struct stmpe_gpio_platform_data *gpio; 198 - struct stmpe_keypad_platform_data *keypad; 199 215 struct stmpe_ts_platform_data *ts; 200 216 }; 201 217
+1 -1
include/linux/mm.h
··· 1952 1952 #if VM_GROWSUP 1953 1953 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1954 1954 #else 1955 - #define expand_upwards(vma, address) do { } while (0) 1955 + #define expand_upwards(vma, address) (0) 1956 1956 #endif 1957 1957 1958 1958 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+1
include/linux/mmc/sdhci.h
··· 137 137 #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 138 138 #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 139 139 #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ 140 + #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ 140 141 141 142 unsigned int version; /* SDHCI spec. version */ 142 143
+14 -12
include/linux/netdevice.h
··· 852 852 * 3. Update dev->stats asynchronously and atomically, and define 853 853 * neither operation. 854 854 * 855 - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 855 + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); 856 856 * If device support VLAN filtering this function is called when a 857 857 * VLAN id is registered. 858 858 * 859 - * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 859 + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); 860 860 * If device support VLAN filtering this function is called when a 861 861 * VLAN id is unregistered. 862 862 * ··· 1012 1012 * Callback to use for xmit over the accelerated station. This 1013 1013 * is used in place of ndo_start_xmit on accelerated net 1014 1014 * devices. 1015 - * bool (*ndo_gso_check) (struct sk_buff *skb, 1016 - * struct net_device *dev); 1015 + * netdev_features_t (*ndo_features_check) (struct sk_buff *skb, 1016 + * struct net_device *dev 1017 + * netdev_features_t features); 1017 1018 * Called by core transmit path to determine if device is capable of 1018 - * performing GSO on a packet. The device returns true if it is 1019 - * able to GSO the packet, false otherwise. If the return value is 1020 - * false the stack will do software GSO. 1019 + * performing offload operations on a given packet. This is to give 1020 + * the device an opportunity to implement any restrictions that cannot 1021 + * be otherwise expressed by feature flags. The check is called with 1022 + * the set of features that the stack has calculated and it returns 1023 + * those the driver believes to be appropriate. 1021 1024 * 1022 1025 * int (*ndo_switch_parent_id_get)(struct net_device *dev, 1023 1026 * struct netdev_phys_item_id *psid); ··· 1181 1178 struct net_device *dev, 1182 1179 void *priv); 1183 1180 int (*ndo_get_lock_subclass)(struct net_device *dev); 1184 - bool (*ndo_gso_check) (struct sk_buff *skb, 1185 - struct net_device *dev); 1181 + netdev_features_t (*ndo_features_check) (struct sk_buff *skb, 1182 + struct net_device *dev, 1183 + netdev_features_t features); 1186 1184 #ifdef CONFIG_NET_SWITCHDEV 1187 1185 int (*ndo_switch_parent_id_get)(struct net_device *dev, 1188 1186 struct netdev_phys_item_id *psid); ··· 2085 2081 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 2086 2082 #define for_each_netdev_in_bond_rcu(bond, slave) \ 2087 2083 for_each_netdev_rcu(&init_net, slave) \ 2088 - if (netdev_master_upper_dev_get_rcu(slave) == bond) 2084 + if (netdev_master_upper_dev_get_rcu(slave) == (bond)) 2089 2085 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 2090 2086 2091 2087 static inline struct net_device *next_net_device(struct net_device *dev) ··· 3615 3611 netdev_features_t features) 3616 3612 { 3617 3613 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 3618 - (dev->netdev_ops->ndo_gso_check && 3619 - !dev->netdev_ops->ndo_gso_check(skb, dev)) || 3620 3614 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 3621 3615 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 3622 3616 }
+2 -2
include/linux/netlink.h
··· 46 46 unsigned int flags; 47 47 void (*input)(struct sk_buff *skb); 48 48 struct mutex *cb_mutex; 49 - int (*bind)(int group); 50 - void (*unbind)(int group); 49 + int (*bind)(struct net *net, int group); 50 + void (*unbind)(struct net *net, int group); 51 51 bool (*compare)(struct net *net, struct sock *sk); 52 52 }; 53 53
+3
include/linux/nfs_fs_sb.h
··· 74 74 /* idmapper */ 75 75 struct idmap * cl_idmap; 76 76 77 + /* Client owner identifier */ 78 + const char * cl_owner_id; 79 + 77 80 /* Our own IP address, as a null-terminated string. 78 81 * This is used to generate the mv0 callback address. 79 82 */
+6 -7
include/linux/pagemap.h
··· 251 251 #define FGP_NOWAIT 0x00000020 252 252 253 253 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 254 - int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); 254 + int fgp_flags, gfp_t cache_gfp_mask); 255 255 256 256 /** 257 257 * find_get_page - find and get a page reference ··· 266 266 static inline struct page *find_get_page(struct address_space *mapping, 267 267 pgoff_t offset) 268 268 { 269 - return pagecache_get_page(mapping, offset, 0, 0, 0); 269 + return pagecache_get_page(mapping, offset, 0, 0); 270 270 } 271 271 272 272 static inline struct page *find_get_page_flags(struct address_space *mapping, 273 273 pgoff_t offset, int fgp_flags) 274 274 { 275 - return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); 275 + return pagecache_get_page(mapping, offset, fgp_flags, 0); 276 276 } 277 277 278 278 /** ··· 292 292 static inline struct page *find_lock_page(struct address_space *mapping, 293 293 pgoff_t offset) 294 294 { 295 - return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); 295 + return pagecache_get_page(mapping, offset, FGP_LOCK, 0); 296 296 } 297 297 298 298 /** ··· 319 319 { 320 320 return pagecache_get_page(mapping, offset, 321 321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 322 - gfp_mask, gfp_mask & GFP_RECLAIM_MASK); 322 + gfp_mask); 323 323 } 324 324 325 325 /** ··· 340 340 { 341 341 return pagecache_get_page(mapping, index, 342 342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 343 - mapping_gfp_mask(mapping), 344 - GFP_NOFS); 343 + mapping_gfp_mask(mapping)); 345 344 } 346 345 347 346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+7 -5
include/linux/perf_event.h
··· 79 79 struct perf_branch_entry entries[0]; 80 80 }; 81 81 82 - struct perf_regs { 83 - __u64 abi; 84 - struct pt_regs *regs; 85 - }; 86 - 87 82 struct task_struct; 88 83 89 84 /* ··· 605 610 u32 reserved; 606 611 } cpu_entry; 607 612 struct perf_callchain_entry *callchain; 613 + 614 + /* 615 + * regs_user may point to task_pt_regs or to regs_user_copy, depending 616 + * on arch details. 617 + */ 608 618 struct perf_regs regs_user; 619 + struct pt_regs regs_user_copy; 620 + 609 621 struct perf_regs regs_intr; 610 622 u64 stack_user_size; 611 623 } ____cacheline_aligned;
+16
include/linux/perf_regs.h
··· 1 1 #ifndef _LINUX_PERF_REGS_H 2 2 #define _LINUX_PERF_REGS_H 3 3 4 + struct perf_regs { 5 + __u64 abi; 6 + struct pt_regs *regs; 7 + }; 8 + 4 9 #ifdef CONFIG_HAVE_PERF_REGS 5 10 #include <asm/perf_regs.h> 6 11 u64 perf_reg_value(struct pt_regs *regs, int idx); 7 12 int perf_reg_validate(u64 mask); 8 13 u64 perf_reg_abi(struct task_struct *task); 14 + void perf_get_regs_user(struct perf_regs *regs_user, 15 + struct pt_regs *regs, 16 + struct pt_regs *regs_user_copy); 9 17 #else 10 18 static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 11 19 { ··· 28 20 static inline u64 perf_reg_abi(struct task_struct *task) 29 21 { 30 22 return PERF_SAMPLE_REGS_ABI_NONE; 23 + } 24 + 25 + static inline void perf_get_regs_user(struct perf_regs *regs_user, 26 + struct pt_regs *regs, 27 + struct pt_regs *regs_user_copy) 28 + { 29 + regs_user->regs = task_pt_regs(current); 30 + regs_user->abi = perf_reg_abi(current); 31 31 } 32 32 #endif /* CONFIG_HAVE_PERF_REGS */ 33 33 #endif /* _LINUX_PERF_REGS_H */
+3 -3
include/linux/phy/omap_control_phy.h
··· 66 66 #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 67 67 68 68 #define OMAP_CTRL_PCIE_PCS_MASK 0xff 69 - #define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 69 + #define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16 70 70 71 71 #define OMAP_CTRL_USB2_PHY_PD BIT(28) 72 72 ··· 79 79 void omap_control_phy_power(struct device *dev, int on); 80 80 void omap_control_usb_set_mode(struct device *dev, 81 81 enum omap_control_usb_mode mode); 82 - void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); 82 + void omap_control_pcie_pcs(struct device *dev, u8 delay); 83 83 #else 84 84 85 85 static inline void omap_control_phy_power(struct device *dev, int on) ··· 91 91 { 92 92 } 93 93 94 - static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) 94 + static inline void omap_control_pcie_pcs(struct device *dev, u8 delay) 95 95 { 96 96 } 97 97 #endif
+8
include/linux/pm_domain.h
··· 271 271 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 272 272 void *data); 273 273 void of_genpd_del_provider(struct device_node *np); 274 + struct generic_pm_domain *of_genpd_get_from_provider( 275 + struct of_phandle_args *genpdspec); 274 276 275 277 struct generic_pm_domain *__of_genpd_xlate_simple( 276 278 struct of_phandle_args *genpdspec, ··· 289 287 return 0; 290 288 } 291 289 static inline void of_genpd_del_provider(struct device_node *np) {} 290 + 291 + static inline struct generic_pm_domain *of_genpd_get_from_provider( 292 + struct of_phandle_args *genpdspec) 293 + { 294 + return NULL; 295 + } 292 296 293 297 #define __of_genpd_xlate_simple NULL 294 298 #define __of_genpd_xlate_onecell NULL
+10
include/linux/rmap.h
··· 37 37 atomic_t refcount; 38 38 39 39 /* 40 + * Count of child anon_vmas and VMAs which points to this anon_vma. 41 + * 42 + * This counter is used for making decision about reusing anon_vma 43 + * instead of forking new one. See comments in function anon_vma_clone. 44 + */ 45 + unsigned degree; 46 + 47 + struct anon_vma *parent; /* Parent of this anon_vma */ 48 + 49 + /* 40 50 * NOTE: the LSB of the rb_root.rb_node is set by 41 51 * mm_take_all_locks() _after_ taking the above lock. So the 42 52 * rb_root must only be read/written after taking the above lock
+1 -1
include/linux/thermal.h
··· 38 38 #define THERMAL_CSTATE_INVALID -1UL 39 39 40 40 /* No upper/lower limit requirement */ 41 - #define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID 41 + #define THERMAL_NO_LIMIT ((u32)~0) 42 42 43 43 /* Unit conversion macros */ 44 44 #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
-1
include/linux/writeback.h
··· 177 177 struct writeback_control *wbc, writepage_t writepage, 178 178 void *data); 179 179 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 180 - void set_page_dirty_balance(struct page *page); 181 180 void writeback_set_ratelimit(void); 182 181 void tag_pages_for_writeback(struct address_space *mapping, 183 182 pgoff_t start, pgoff_t end);
+7 -2
include/net/genetlink.h
··· 31 31 * do additional, common, filtering and return an error 32 32 * @post_doit: called after an operation's doit callback, it may 33 33 * undo operations done by pre_doit, for example release locks 34 + * @mcast_bind: a socket bound to the given multicast group (which 35 + * is given as the offset into the groups array) 36 + * @mcast_unbind: a socket was unbound from the given multicast group 34 37 * @attrbuf: buffer to store parsed attributes 35 38 * @family_list: family list 36 39 * @mcgrps: multicast groups used by this family (private) ··· 56 53 void (*post_doit)(const struct genl_ops *ops, 57 54 struct sk_buff *skb, 58 55 struct genl_info *info); 56 + int (*mcast_bind)(struct net *net, int group); 57 + void (*mcast_unbind)(struct net *net, int group); 59 58 struct nlattr ** attrbuf; /* private */ 60 59 const struct genl_ops * ops; /* private */ 61 60 const struct genl_multicast_group *mcgrps; /* private */ ··· 400 395 } 401 396 402 397 static inline int genl_has_listeners(struct genl_family *family, 403 - struct sock *sk, unsigned int group) 398 + struct net *net, unsigned int group) 404 399 { 405 400 if (WARN_ON_ONCE(group >= family->n_mcgrps)) 406 401 return -EINVAL; 407 402 group = family->mcgrp_offset + group; 408 - return netlink_has_listeners(sk, group); 403 + return netlink_has_listeners(net->genl_sock, group); 409 404 } 410 405 #endif /* __NET_GENERIC_NETLINK_H */
+2 -5
include/net/mac80211.h
··· 1270 1270 * 1271 1271 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1272 1272 * driver to indicate that it requires IV generation for this 1273 - * particular key. Setting this flag does not necessarily mean that SKBs 1274 - * will have sufficient tailroom for ICV or MIC. 1273 + * particular key. 1275 1274 * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by 1276 1275 * the driver for a TKIP key if it requires Michael MIC 1277 1276 * generation in software. ··· 1282 1283 * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver 1283 1284 * if space should be prepared for the IV, but the IV 1284 1285 * itself should not be generated. Do not set together with 1285 - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does 1286 - * not necessarily mean that SKBs will have sufficient tailroom for ICV or 1287 - * MIC. 1286 + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. 1288 1287 * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received 1289 1288 * management frames. The flag can help drivers that have a hardware 1290 1289 * crypto implementation that doesn't deal with management frames
-1
include/net/neighbour.h
··· 190 190 191 191 192 192 struct neigh_table { 193 - struct neigh_table *next; 194 193 int family; 195 194 int entry_size; 196 195 int key_len;
+24 -4
include/net/vxlan.h
··· 1 1 #ifndef __NET_VXLAN_H 2 2 #define __NET_VXLAN_H 1 3 3 4 + #include <linux/ip.h> 5 + #include <linux/ipv6.h> 6 + #include <linux/if_vlan.h> 4 7 #include <linux/skbuff.h> 5 8 #include <linux/netdevice.h> 6 9 #include <linux/udp.h> ··· 54 51 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 55 52 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); 56 53 57 - static inline bool vxlan_gso_check(struct sk_buff *skb) 54 + static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, 55 + netdev_features_t features) 58 56 { 59 - if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && 57 + u8 l4_hdr = 0; 58 + 59 + if (!skb->encapsulation) 60 + return features; 61 + 62 + switch (vlan_get_protocol(skb)) { 63 + case htons(ETH_P_IP): 64 + l4_hdr = ip_hdr(skb)->protocol; 65 + break; 66 + case htons(ETH_P_IPV6): 67 + l4_hdr = ipv6_hdr(skb)->nexthdr; 68 + break; 69 + default: 70 + return features;; 71 + } 72 + 73 + if ((l4_hdr == IPPROTO_UDP) && 60 74 (skb->inner_protocol_type != ENCAP_TYPE_ETHER || 61 75 skb->inner_protocol != htons(ETH_P_TEB) || 62 76 (skb_inner_mac_header(skb) - skb_transport_header(skb) != 63 77 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) 64 - return false; 78 + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); 65 79 66 - return true; 80 + return features; 67 81 } 68 82 69 83 /* IP header + UDP + VXLAN + Ethernet header */
+5 -5
include/sound/pcm.h
··· 857 857 } 858 858 859 859 /** 860 - * params_channels - Get the sample rate from the hw params 860 + * params_rate - Get the sample rate from the hw params 861 861 * @p: hw params 862 862 */ 863 863 static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) ··· 866 866 } 867 867 868 868 /** 869 - * params_channels - Get the period size (in frames) from the hw params 869 + * params_period_size - Get the period size (in frames) from the hw params 870 870 * @p: hw params 871 871 */ 872 872 static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) ··· 875 875 } 876 876 877 877 /** 878 - * params_channels - Get the number of periods from the hw params 878 + * params_periods - Get the number of periods from the hw params 879 879 * @p: hw params 880 880 */ 881 881 static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) ··· 884 884 } 885 885 886 886 /** 887 - * params_channels - Get the buffer size (in frames) from the hw params 887 + * params_buffer_size - Get the buffer size (in frames) from the hw params 888 888 * @p: hw params 889 889 */ 890 890 static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) ··· 893 893 } 894 894 895 895 /** 896 - * params_channels - Get the buffer size (in bytes) from the hw params 896 + * params_buffer_bytes - Get the buffer size (in bytes) from the hw params 897 897 * @p: hw params 898 898 */ 899 899 static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
-1
include/target/target_core_backend.h
··· 135 135 int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 136 136 int se_dev_set_queue_depth(struct se_device *, u32); 137 137 int se_dev_set_max_sectors(struct se_device *, u32); 138 - int se_dev_set_fabric_max_sectors(struct se_device *, u32); 139 138 int se_dev_set_optimal_sectors(struct se_device *, u32); 140 139 int se_dev_set_block_size(struct se_device *, u32); 141 140
-2
include/target/target_core_backend_configfs.h
··· 98 98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ 99 99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ 100 100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ 101 - DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ 102 - TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ 103 101 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ 104 102 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ 105 103 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
-3
include/target/target_core_base.h
··· 77 77 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 78 78 /* Default max_write_same_len, disabled by default */ 79 79 #define DA_MAX_WRITE_SAME_LEN 0 80 - /* Default max transfer length */ 81 - #define DA_FABRIC_MAX_SECTORS 8192 82 80 /* Use a model alias based on the configfs backend device name */ 83 81 #define DA_EMULATE_MODEL_ALIAS 0 84 82 /* Emulation for Direct Page Out */ ··· 692 694 u32 hw_block_size; 693 695 u32 block_size; 694 696 u32 hw_max_sectors; 695 - u32 fabric_max_sectors; 696 697 u32 optimal_sectors; 697 698 u32 hw_queue_depth; 698 699 u32 queue_depth;
+1 -1
include/uapi/asm-generic/fcntl.h
··· 5 5 6 6 /* 7 7 * FMODE_EXEC is 0x20 8 - * FMODE_NONOTIFY is 0x1000000 8 + * FMODE_NONOTIFY is 0x4000000 9 9 * These cannot be used by userspace O_* until internal and external open 10 10 * flags are split. 11 11 * -Eric Paris
+2 -1
include/uapi/linux/in6.h
··· 149 149 /* 150 150 * IPV6 socket options 151 151 */ 152 - 152 + #if __UAPI_DEF_IPV6_OPTIONS 153 153 #define IPV6_ADDRFORM 1 154 154 #define IPV6_2292PKTINFO 2 155 155 #define IPV6_2292HOPOPTS 3 ··· 196 196 197 197 #define IPV6_IPSEC_POLICY 34 198 198 #define IPV6_XFRM_POLICY 35 199 + #endif 199 200 200 201 /* 201 202 * Multicast:
+22 -15
include/uapi/linux/kfd_ioctl.h
··· 128 128 uint32_t pad; 129 129 }; 130 130 131 - #define KFD_IOC_MAGIC 'K' 131 + #define AMDKFD_IOCTL_BASE 'K' 132 + #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 133 + #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 134 + #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 135 + #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 132 136 133 - #define KFD_IOC_GET_VERSION \ 134 - _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) 137 + #define AMDKFD_IOC_GET_VERSION \ 138 + AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 135 139 136 - #define KFD_IOC_CREATE_QUEUE \ 137 - _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) 140 + #define AMDKFD_IOC_CREATE_QUEUE \ 141 + AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 138 142 139 - #define KFD_IOC_DESTROY_QUEUE \ 140 - _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) 143 + #define AMDKFD_IOC_DESTROY_QUEUE \ 144 + AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 141 145 142 - #define KFD_IOC_SET_MEMORY_POLICY \ 143 - _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) 146 + #define AMDKFD_IOC_SET_MEMORY_POLICY \ 147 + AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 144 148 145 - #define KFD_IOC_GET_CLOCK_COUNTERS \ 146 - _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) 149 + #define AMDKFD_IOC_GET_CLOCK_COUNTERS \ 150 + AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 147 151 148 - #define KFD_IOC_GET_PROCESS_APERTURES \ 149 - _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) 152 + #define AMDKFD_IOC_GET_PROCESS_APERTURES \ 153 + AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 150 154 151 - #define KFD_IOC_UPDATE_QUEUE \ 152 - _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) 155 + #define AMDKFD_IOC_UPDATE_QUEUE \ 156 + AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 157 + 158 + #define AMDKFD_COMMAND_START 0x01 159 + #define AMDKFD_COMMAND_END 0x08 153 160 154 161 #endif
+3
include/uapi/linux/libc-compat.h
··· 69 69 #define __UAPI_DEF_SOCKADDR_IN6 0 70 70 #define __UAPI_DEF_IPV6_MREQ 0 71 71 #define __UAPI_DEF_IPPROTO_V6 0 72 + #define __UAPI_DEF_IPV6_OPTIONS 0 72 73 73 74 #else 74 75 ··· 83 82 #define __UAPI_DEF_SOCKADDR_IN6 1 84 83 #define __UAPI_DEF_IPV6_MREQ 1 85 84 #define __UAPI_DEF_IPPROTO_V6 1 85 + #define __UAPI_DEF_IPV6_OPTIONS 1 86 86 87 87 #endif /* _NETINET_IN_H */ 88 88 ··· 105 103 #define __UAPI_DEF_SOCKADDR_IN6 1 106 104 #define __UAPI_DEF_IPV6_MREQ 1 107 105 #define __UAPI_DEF_IPPROTO_V6 1 106 + #define __UAPI_DEF_IPV6_OPTIONS 1 108 107 109 108 /* Definitions for xattr.h */ 110 109 #define __UAPI_DEF_XATTR 1
+4
include/uapi/linux/openvswitch.h
··· 174 174 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ 175 175 OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* 176 176 attributes. */ 177 + OVS_PACKET_ATTR_UNUSED1, 178 + OVS_PACKET_ATTR_UNUSED2, 179 + OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, 180 + error logging should be suppressed. */ 177 181 __OVS_PACKET_ATTR_MAX 178 182 }; 179 183
+7
include/uapi/linux/virtio_ring.h
··· 101 101 struct vring_used *used; 102 102 }; 103 103 104 + /* Alignment requirements for vring elements. 105 + * When using pre-virtio 1.0 layout, these fall out naturally. 106 + */ 107 + #define VRING_AVAIL_ALIGN_SIZE 2 108 + #define VRING_USED_ALIGN_SIZE 4 109 + #define VRING_DESC_ALIGN_SIZE 16 110 + 104 111 /* The standard layout for the ring is a continuous chunk of memory which looks 105 112 * like this. We assume num is a power of 2. 106 113 *
+51
include/xen/interface/nmi.h
··· 1 + /****************************************************************************** 2 + * nmi.h 3 + * 4 + * NMI callback registration and reason codes. 5 + * 6 + * Copyright (c) 2005, Keir Fraser <keir@xensource.com> 7 + */ 8 + 9 + #ifndef __XEN_PUBLIC_NMI_H__ 10 + #define __XEN_PUBLIC_NMI_H__ 11 + 12 + #include <xen/interface/xen.h> 13 + 14 + /* 15 + * NMI reason codes: 16 + * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. 17 + */ 18 + /* I/O-check error reported via ISA port 0x61, bit 6. */ 19 + #define _XEN_NMIREASON_io_error 0 20 + #define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) 21 + /* PCI SERR reported via ISA port 0x61, bit 7. */ 22 + #define _XEN_NMIREASON_pci_serr 1 23 + #define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) 24 + /* Unknown hardware-generated NMI. */ 25 + #define _XEN_NMIREASON_unknown 2 26 + #define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) 27 + 28 + /* 29 + * long nmi_op(unsigned int cmd, void *arg) 30 + * NB. All ops return zero on success, else a negative error code. 31 + */ 32 + 33 + /* 34 + * Register NMI callback for this (calling) VCPU. Currently this only makes 35 + * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. 36 + * arg == pointer to xennmi_callback structure. 37 + */ 38 + #define XENNMI_register_callback 0 39 + struct xennmi_callback { 40 + unsigned long handler_address; 41 + unsigned long pad; 42 + }; 43 + DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback); 44 + 45 + /* 46 + * Deregister NMI callback for this (calling) VCPU. 47 + * arg == NULL. 48 + */ 49 + #define XENNMI_unregister_callback 1 50 + 51 + #endif /* __XEN_PUBLIC_NMI_H__ */
+1 -1
kernel/audit.c
··· 1100 1100 } 1101 1101 1102 1102 /* Run custom bind function on netlink socket group connect or bind requests. */ 1103 - static int audit_bind(int group) 1103 + static int audit_bind(struct net *net, int group) 1104 1104 { 1105 1105 if (!capable(CAP_AUDIT_READ)) 1106 1106 return -EPERM;
+40 -9
kernel/auditsc.c
··· 72 72 #include <linux/fs_struct.h> 73 73 #include <linux/compat.h> 74 74 #include <linux/ctype.h> 75 + #include <linux/string.h> 76 + #include <uapi/linux/limits.h> 75 77 76 78 #include "audit.h" 77 79 ··· 1863 1861 } 1864 1862 1865 1863 list_for_each_entry_reverse(n, &context->names_list, list) { 1866 - /* does the name pointer match? */ 1867 - if (!n->name || n->name->name != name->name) 1864 + if (!n->name || strcmp(n->name->name, name->name)) 1868 1865 continue; 1869 1866 1870 1867 /* match the correct record type */ ··· 1882 1881 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); 1883 1882 if (!n) 1884 1883 return; 1885 - if (name) 1886 - /* since name is not NULL we know there is already a matching 1887 - * name record, see audit_getname(), so there must be a type 1888 - * mismatch; reuse the string path since the original name 1889 - * record will keep the string valid until we free it in 1890 - * audit_free_names() */ 1891 - n->name = name; 1884 + /* unfortunately, while we may have a path name to record with the 1885 + * inode, we can't always rely on the string lasting until the end of 1886 + * the syscall so we need to create our own copy, it may fail due to 1887 + * memory allocation issues, but we do our best */ 1888 + if (name) { 1889 + /* we can't use getname_kernel() due to size limits */ 1890 + size_t len = strlen(name->name) + 1; 1891 + struct filename *new = __getname(); 1892 1892 1893 + if (unlikely(!new)) 1894 + goto out; 1895 + 1896 + if (len <= (PATH_MAX - sizeof(*new))) { 1897 + new->name = (char *)(new) + sizeof(*new); 1898 + new->separate = false; 1899 + } else if (len <= PATH_MAX) { 1900 + /* this looks odd, but is due to final_putname() */ 1901 + struct filename *new2; 1902 + 1903 + new2 = kmalloc(sizeof(*new2), GFP_KERNEL); 1904 + if (unlikely(!new2)) { 1905 + __putname(new); 1906 + goto out; 1907 + } 1908 + new2->name = (char *)new; 1909 + new2->separate = true; 1910 + new = new2; 1911 + } else { 1912 + /* we should never get here, but let's be safe */ 1913 + __putname(new); 1914 + goto out; 1915 + } 1916 + strlcpy((char *)new->name, name->name, len); 1917 + new->uptr = NULL; 1918 + new->aname = n; 1919 + n->name = new; 1920 + n->name_put = true; 1921 + } 1893 1922 out: 1894 1923 if (parent) { 1895 1924 n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
+28 -24
kernel/debug/debug_core.c
··· 27 27 * version 2. This program is licensed "as is" without any warranty of any 28 28 * kind, whether express or implied. 29 29 */ 30 + 31 + #define pr_fmt(fmt) "KGDB: " fmt 32 + 30 33 #include <linux/pid_namespace.h> 31 34 #include <linux/clocksource.h> 32 35 #include <linux/serial_core.h> ··· 199 196 return err; 200 197 err = kgdb_arch_remove_breakpoint(&tmp); 201 198 if (err) 202 - printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " 203 - "memory destroyed at: %lx", addr); 199 + pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", 200 + addr); 204 201 return err; 205 202 } 206 203 ··· 259 256 error = kgdb_arch_set_breakpoint(&kgdb_break[i]); 260 257 if (error) { 261 258 ret = error; 262 - printk(KERN_INFO "KGDB: BP install failed: %lx", 263 - kgdb_break[i].bpt_addr); 259 + pr_info("BP install failed: %lx\n", 260 + kgdb_break[i].bpt_addr); 264 261 continue; 265 262 } 266 263 ··· 322 319 continue; 323 320 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 324 321 if (error) { 325 - printk(KERN_INFO "KGDB: BP remove failed: %lx\n", 326 - kgdb_break[i].bpt_addr); 322 + pr_info("BP remove failed: %lx\n", 323 + kgdb_break[i].bpt_addr); 327 324 ret = error; 328 325 } 329 326 ··· 370 367 goto setundefined; 371 368 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); 372 369 if (error) 373 - printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", 370 + pr_err("breakpoint remove failed: %lx\n", 374 371 kgdb_break[i].bpt_addr); 375 372 setundefined: 376 373 kgdb_break[i].state = BP_UNDEFINED; ··· 403 400 if (print_wait) { 404 401 #ifdef CONFIG_KGDB_KDB 405 402 if (!dbg_kdb_mode) 406 - printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); 403 + pr_crit("waiting... or $3#33 for KDB\n"); 407 404 #else 408 - printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); 405 + pr_crit("Waiting for remote debugger\n"); 409 406 #endif 410 407 } 411 408 return 1; ··· 433 430 exception_level = 0; 434 431 kgdb_skipexception(ks->ex_vector, ks->linux_regs); 435 432 dbg_activate_sw_breakpoints(); 436 - printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", 437 - addr); 433 + pr_crit("re-enter error: breakpoint removed %lx\n", addr); 438 434 WARN_ON_ONCE(1); 439 435 440 436 return 1; ··· 446 444 panic("Recursive entry to debugger"); 447 445 } 448 446 449 - printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); 447 + pr_crit("re-enter exception: ALL breakpoints killed\n"); 450 448 #ifdef CONFIG_KGDB_KDB 451 449 /* Allow kdb to debug itself one level */ 452 450 return 0; ··· 473 471 int cpu; 474 472 int trace_on = 0; 475 473 int online_cpus = num_online_cpus(); 474 + u64 time_left; 476 475 477 476 kgdb_info[ks->cpu].enter_kgdb++; 478 477 kgdb_info[ks->cpu].exception_state |= exception_state; ··· 598 595 /* 599 596 * Wait for the other CPUs to be notified and be waiting for us: 600 597 */ 601 - while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + 602 - atomic_read(&slaves_in_kgdb)) != online_cpus) 598 + time_left = loops_per_jiffy * HZ; 599 + while (kgdb_do_roundup && --time_left && 600 + (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != 601 + online_cpus) 603 602 cpu_relax(); 603 + if (!time_left) 604 + pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); 604 605 605 606 /* 606 607 * At this point the primary processor is completely ··· 802 795 static void sysrq_handle_dbg(int key) 803 796 { 804 797 if (!dbg_io_ops) { 805 - printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 798 + pr_crit("ERROR: No KGDB I/O module available\n"); 806 799 return; 807 800 } 808 801 if (!kgdb_connected) { 809 802 #ifdef CONFIG_KGDB_KDB 810 803 if (!dbg_kdb_mode) 811 - printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); 804 + pr_crit("KGDB or $3#33 for KDB\n"); 812 805 #else 813 - printk(KERN_CRIT "Entering KGDB\n"); 806 + pr_crit("Entering KGDB\n"); 814 807 #endif 815 808 } 816 809 ··· 952 945 { 953 946 kgdb_break_asap = 0; 954 947 955 - printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); 948 + pr_crit("Waiting for connection from remote gdb...\n"); 956 949 kgdb_breakpoint(); 957 950 } 958 951 ··· 971 964 if (dbg_io_ops) { 972 965 spin_unlock(&kgdb_registration_lock); 973 966 974 - printk(KERN_ERR "kgdb: Another I/O driver is already " 975 - "registered with KGDB.\n"); 967 + pr_err("Another I/O driver is already registered with KGDB\n"); 976 968 return -EBUSY; 977 969 } 978 970 ··· 987 981 988 982 spin_unlock(&kgdb_registration_lock); 989 983 990 - printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", 991 - new_dbg_io_ops->name); 984 + pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); 992 985 993 986 /* Arm KGDB now. */ 994 987 kgdb_register_callbacks(); ··· 1022 1017 1023 1018 spin_unlock(&kgdb_registration_lock); 1024 1019 1025 - printk(KERN_INFO 1026 - "kgdb: Unregistered I/O driver %s, debugger disabled.\n", 1020 + pr_info("Unregistered I/O driver %s, debugger disabled\n", 1027 1021 old_dbg_io_ops->name); 1028 1022 } 1029 1023 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+21 -14
kernel/debug/kdb/kdb_bp.c
··· 531 531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) 532 532 bp->bp_free = 1; 533 533 534 - kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", 535 - "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 536 - kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", 537 - "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); 534 + kdb_register_flags("bp", kdb_bp, "[<vaddr>]", 535 + "Set/Display breakpoints", 0, 536 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 537 + kdb_register_flags("bl", kdb_bp, "[<vaddr>]", 538 + "Display breakpoints", 0, 539 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 538 540 if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) 539 - kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", 540 - "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); 541 - kdb_register_repeat("bc", kdb_bc, "<bpnum>", 542 - "Clear Breakpoint", 0, KDB_REPEAT_NONE); 543 - kdb_register_repeat("be", kdb_bc, "<bpnum>", 544 - "Enable Breakpoint", 0, KDB_REPEAT_NONE); 545 - kdb_register_repeat("bd", kdb_bc, "<bpnum>", 546 - "Disable Breakpoint", 0, KDB_REPEAT_NONE); 541 + kdb_register_flags("bph", kdb_bp, "[<vaddr>]", 542 + "[datar [length]|dataw [length]] Set hw brk", 0, 543 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 544 + kdb_register_flags("bc", kdb_bc, "<bpnum>", 545 + "Clear Breakpoint", 0, 546 + KDB_ENABLE_FLOW_CTRL); 547 + kdb_register_flags("be", kdb_bc, "<bpnum>", 548 + "Enable Breakpoint", 0, 549 + KDB_ENABLE_FLOW_CTRL); 550 + kdb_register_flags("bd", kdb_bc, "<bpnum>", 551 + "Disable Breakpoint", 0, 552 + KDB_ENABLE_FLOW_CTRL); 547 553 548 - kdb_register_repeat("ss", kdb_ss, "", 549 - "Single Step", 1, KDB_REPEAT_NO_ARGS); 554 + kdb_register_flags("ss", kdb_ss, "", 555 + "Single Step", 1, 556 + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); 550 557 /* 551 558 * Architecture dependent initialization. 552 559 */
+4
kernel/debug/kdb/kdb_debugger.c
··· 129 129 ks->pass_exception = 1; 130 130 KDB_FLAG_SET(CATASTROPHIC); 131 131 } 132 + /* set CATASTROPHIC if the system contains unresponsive processors */ 133 + for_each_online_cpu(i) 134 + if (!kgdb_info[i].enter_kgdb) 135 + KDB_FLAG_SET(CATASTROPHIC); 132 136 if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { 133 137 KDB_STATE_CLEAR(SSBPT); 134 138 KDB_STATE_CLEAR(DOING_SS);
+170 -95
kernel/debug/kdb/kdb_main.c
··· 12 12 */ 13 13 14 14 #include <linux/ctype.h> 15 + #include <linux/types.h> 15 16 #include <linux/string.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/kmsg_dump.h> ··· 24 23 #include <linux/vmalloc.h> 25 24 #include <linux/atomic.h> 26 25 #include <linux/module.h> 26 + #include <linux/moduleparam.h> 27 27 #include <linux/mm.h> 28 28 #include <linux/init.h> 29 29 #include <linux/kallsyms.h> ··· 43 41 #include <linux/uaccess.h> 44 42 #include <linux/slab.h> 45 43 #include "kdb_private.h" 44 + 45 + #undef MODULE_PARAM_PREFIX 46 + #define MODULE_PARAM_PREFIX "kdb." 47 + 48 + static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; 49 + module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); 46 50 47 51 #define GREP_LEN 256 48 52 char kdb_grep_string[GREP_LEN]; ··· 129 121 KDBMSG(BADLENGTH, "Invalid length field"), 130 122 KDBMSG(NOBP, "No Breakpoint exists"), 131 123 KDBMSG(BADADDR, "Invalid address"), 124 + KDBMSG(NOPERM, "Permission denied"), 132 125 }; 133 126 #undef KDBMSG 134 127 ··· 194 185 p = krp->p; 195 186 #endif 196 187 return p; 188 + } 189 + 190 + /* 191 + * Check whether the flags of the current command and the permissions 192 + * of the kdb console has allow a command to be run. 193 + */ 194 + static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, 195 + bool no_args) 196 + { 197 + /* permissions comes from userspace so needs massaging slightly */ 198 + permissions &= KDB_ENABLE_MASK; 199 + permissions |= KDB_ENABLE_ALWAYS_SAFE; 200 + 201 + /* some commands change group when launched with no arguments */ 202 + if (no_args) 203 + permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; 204 + 205 + flags |= KDB_ENABLE_ALL; 206 + 207 + return permissions & flags; 197 208 } 198 209 199 210 /* ··· 505 476 kdb_symtab_t symtab; 506 477 507 478 /* 479 + * If the enable flags prohibit both arbitrary memory access 480 + * and flow control then there are no reasonable grounds to 481 + * provide symbol lookup. 482 + */ 483 + if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, 484 + kdb_cmd_enabled, false)) 485 + return KDB_NOPERM; 486 + 487 + /* 508 488 * Process arguments which follow the following syntax: 509 489 * 510 490 * symbol | numeric-address [+/- numeric-offset] ··· 679 641 if (!s->count) 680 642 s->usable = 0; 681 643 if (s->usable) 682 - kdb_register(s->name, kdb_exec_defcmd, 683 - s->usage, s->help, 0); 644 + /* macros are always safe because when executed each 645 + * internal command re-enters kdb_parse() and is 646 + * safety checked individually. 647 + */ 648 + kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, 649 + s->help, 0, 650 + KDB_ENABLE_ALWAYS_SAFE); 684 651 return 0; 685 652 } 686 653 if (!s->usable) ··· 1046 1003 1047 1004 if (i < kdb_max_commands) { 1048 1005 int result; 1006 + 1007 + if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) 1008 + return KDB_NOPERM; 1009 + 1049 1010 KDB_STATE_SET(CMD); 1050 1011 result = (*tp->cmd_func)(argc-1, (const char **)argv); 1051 1012 if (result && ignore_errors && result > KDB_CMD_GO) 1052 1013 result = 0; 1053 1014 KDB_STATE_CLEAR(CMD); 1054 - switch (tp->cmd_repeat) { 1055 - case KDB_REPEAT_NONE: 1056 - argc = 0; 1057 - if (argv[0]) 1058 - *(argv[0]) = '\0'; 1059 - break; 1060 - case KDB_REPEAT_NO_ARGS: 1061 - argc = 1; 1062 - if (argv[1]) 1063 - *(argv[1]) = '\0'; 1064 - break; 1065 - case KDB_REPEAT_WITH_ARGS: 1066 - break; 1067 - } 1015 + 1016 + if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) 1017 + return result; 1018 + 1019 + argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; 1020 + if (argv[argc]) 1021 + *(argv[argc]) = '\0'; 1068 1022 return result; 1069 1023 } 1070 1024 ··· 1961 1921 */ 1962 1922 static int kdb_sr(int argc, const char **argv) 1963 1923 { 1924 + bool check_mask = 1925 + !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); 1926 + 1964 1927 if (argc != 1) 1965 1928 return KDB_ARGCOUNT; 1929 + 1966 1930 kdb_trap_printk++; 1967 - __handle_sysrq(*argv[1], false); 1931 + __handle_sysrq(*argv[1], check_mask); 1968 1932 kdb_trap_printk--; 1969 1933 1970 1934 return 0; ··· 2201 2157 for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { 2202 2158 if (!cpu_online(i)) { 2203 2159 state = 'F'; /* cpu is offline */ 2160 + } else if (!kgdb_info[i].enter_kgdb) { 2161 + state = 'D'; /* cpu is online but unresponsive */ 2204 2162 } else { 2205 2163 state = ' '; /* cpu is responding to kdb */ 2206 2164 if (kdb_task_state_char(KDB_TSK(i)) == 'I') ··· 2256 2210 /* 2257 2211 * Validate cpunum 2258 2212 */ 2259 - if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) 2213 + if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) 2260 2214 return KDB_BADCPUNUM; 2261 2215 2262 2216 dbg_switch_cpu = cpunum; ··· 2420 2374 if (KDB_FLAG(CMD_INTERRUPT)) 2421 2375 return 0; 2422 2376 if (!kt->cmd_name) 2377 + continue; 2378 + if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) 2423 2379 continue; 2424 2380 if (strlen(kt->cmd_usage) > 20) 2425 2381 space = "\n "; ··· 2677 2629 } 2678 2630 2679 2631 /* 2680 - * kdb_register_repeat - This function is used to register a kernel 2632 + * kdb_register_flags - This function is used to register a kernel 2681 2633 * debugger command. 2682 2634 * Inputs: 2683 2635 * cmd Command name ··· 2689 2641 * zero for success, one if a duplicate command. 2690 2642 */ 2691 2643 #define kdb_command_extend 50 /* arbitrary */ 2692 - int kdb_register_repeat(char *cmd, 2693 - kdb_func_t func, 2694 - char *usage, 2695 - char *help, 2696 - short minlen, 2697 - kdb_repeat_t repeat) 2644 + int kdb_register_flags(char *cmd, 2645 + kdb_func_t func, 2646 + char *usage, 2647 + char *help, 2648 + short minlen, 2649 + kdb_cmdflags_t flags) 2698 2650 { 2699 2651 int i; 2700 2652 kdbtab_t *kp; ··· 2742 2694 kp->cmd_func = func; 2743 2695 kp->cmd_usage = usage; 2744 2696 kp->cmd_help = help; 2745 - kp->cmd_flags = 0; 2746 2697 kp->cmd_minlen = minlen; 2747 - kp->cmd_repeat = repeat; 2698 + kp->cmd_flags = flags; 2748 2699 2749 2700 return 0; 2750 2701 } 2751 - EXPORT_SYMBOL_GPL(kdb_register_repeat); 2702 + EXPORT_SYMBOL_GPL(kdb_register_flags); 2752 2703 2753 2704 2754 2705 /* 2755 2706 * kdb_register - Compatibility register function for commands that do 2756 2707 * not need to specify a repeat state. Equivalent to 2757 - * kdb_register_repeat with KDB_REPEAT_NONE. 2708 + * kdb_register_flags with flags set to 0. 2758 2709 * Inputs: 2759 2710 * cmd Command name 2760 2711 * func Function to execute the command ··· 2768 2721 char *help, 2769 2722 short minlen) 2770 2723 { 2771 - return kdb_register_repeat(cmd, func, usage, help, minlen, 2772 - KDB_REPEAT_NONE); 2724 + return kdb_register_flags(cmd, func, usage, help, minlen, 0); 2773 2725 } 2774 2726 EXPORT_SYMBOL_GPL(kdb_register); 2775 2727 ··· 2810 2764 for_each_kdbcmd(kp, i) 2811 2765 kp->cmd_name = NULL; 2812 2766 2813 - kdb_register_repeat("md", kdb_md, "<vaddr>", 2767 + kdb_register_flags("md", kdb_md, "<vaddr>", 2814 2768 "Display Memory Contents, also mdWcN, e.g. md8c1", 1, 2815 - KDB_REPEAT_NO_ARGS); 2816 - kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", 2817 - "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); 2818 - kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", 2819 - "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); 2820 - kdb_register_repeat("mds", kdb_md, "<vaddr>", 2821 - "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); 2822 - kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", 2823 - "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); 2824 - kdb_register_repeat("go", kdb_go, "[<vaddr>]", 2825 - "Continue Execution", 1, KDB_REPEAT_NONE); 2826 - kdb_register_repeat("rd", kdb_rd, "", 2827 - "Display Registers", 0, KDB_REPEAT_NONE); 2828 - kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", 2829 - "Modify Registers", 0, KDB_REPEAT_NONE); 2830 - kdb_register_repeat("ef", kdb_ef, "<vaddr>", 2831 - "Display exception frame", 0, KDB_REPEAT_NONE); 2832 - kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", 2833 - "Stack traceback", 1, KDB_REPEAT_NONE); 2834 - kdb_register_repeat("btp", kdb_bt, "<pid>", 2835 - "Display stack for process <pid>", 0, KDB_REPEAT_NONE); 2836 - kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", 2837 - "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); 2838 - kdb_register_repeat("btc", kdb_bt, "", 2839 - "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); 2840 - kdb_register_repeat("btt", kdb_bt, "<vaddr>", 2769 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2770 + kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", 2771 + "Display Raw Memory", 0, 2772 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2773 + kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", 2774 + "Display Physical Memory", 0, 2775 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2776 + kdb_register_flags("mds", kdb_md, "<vaddr>", 2777 + "Display Memory Symbolically", 0, 2778 + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); 2779 + kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", 2780 + "Modify Memory Contents", 0, 2781 + KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); 2782 + kdb_register_flags("go", kdb_go, "[<vaddr>]", 2783 + "Continue Execution", 1, 2784 + KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); 2785 + kdb_register_flags("rd", kdb_rd, "", 2786 + "Display Registers", 0, 2787 + KDB_ENABLE_REG_READ); 2788 + kdb_register_flags("rm", kdb_rm, "<reg> <contents>", 2789 + "Modify Registers", 0, 2790 + KDB_ENABLE_REG_WRITE); 2791 + kdb_register_flags("ef", kdb_ef, "<vaddr>", 2792 + "Display exception frame", 0, 2793 + KDB_ENABLE_MEM_READ); 2794 + kdb_register_flags("bt", kdb_bt, "[<vaddr>]", 2795 + "Stack traceback", 1, 2796 + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); 2797 + kdb_register_flags("btp", kdb_bt, "<pid>", 2798 + "Display stack for process <pid>", 0, 2799 + KDB_ENABLE_INSPECT); 2800 + kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", 2801 + "Backtrace all processes matching state flag", 0, 2802 + KDB_ENABLE_INSPECT); 2803 + kdb_register_flags("btc", kdb_bt, "", 2804 + "Backtrace current process on each cpu", 0, 2805 + KDB_ENABLE_INSPECT); 2806 + kdb_register_flags("btt", kdb_bt, "<vaddr>", 2841 2807 "Backtrace process given its struct task address", 0, 2842 - KDB_REPEAT_NONE); 2843 - kdb_register_repeat("env", kdb_env, "", 2844 - "Show environment variables", 0, KDB_REPEAT_NONE); 2845 - kdb_register_repeat("set", kdb_set, "", 2846 - "Set environment variables", 0, KDB_REPEAT_NONE); 2847 - kdb_register_repeat("help", kdb_help, "", 2848 - "Display Help Message", 1, KDB_REPEAT_NONE); 2849 - kdb_register_repeat("?", kdb_help, "", 2850 - "Display Help Message", 0, KDB_REPEAT_NONE); 2851 - kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", 2852 - "Switch to new cpu", 0, KDB_REPEAT_NONE); 2853 - kdb_register_repeat("kgdb", kdb_kgdb, "", 2854 - "Enter kgdb mode", 0, KDB_REPEAT_NONE); 2855 - kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", 2856 - "Display active task list", 0, KDB_REPEAT_NONE); 2857 - kdb_register_repeat("pid", kdb_pid, "<pidnum>", 2858 - "Switch to another task", 0, KDB_REPEAT_NONE); 2859 - kdb_register_repeat("reboot", kdb_reboot, "", 2860 - "Reboot the machine immediately", 0, KDB_REPEAT_NONE); 2808 + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); 2809 + kdb_register_flags("env", kdb_env, "", 2810 + "Show environment variables", 0, 2811 + KDB_ENABLE_ALWAYS_SAFE); 2812 + kdb_register_flags("set", kdb_set, "", 2813 + "Set environment variables", 0, 2814 + KDB_ENABLE_ALWAYS_SAFE); 2815 + kdb_register_flags("help", kdb_help, "", 2816 + "Display Help Message", 1, 2817 + KDB_ENABLE_ALWAYS_SAFE); 2818 + kdb_register_flags("?", kdb_help, "", 2819 + "Display Help Message", 0, 2820 + KDB_ENABLE_ALWAYS_SAFE); 2821 + kdb_register_flags("cpu", kdb_cpu, "<cpunum>", 2822 + "Switch to new cpu", 0, 2823 + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); 2824 + kdb_register_flags("kgdb", kdb_kgdb, "", 2825 + "Enter kgdb mode", 0, 0); 2826 + kdb_register_flags("ps", kdb_ps, "[<flags>|A]", 2827 + "Display active task list", 0, 2828 + KDB_ENABLE_INSPECT); 2829 + kdb_register_flags("pid", kdb_pid, "<pidnum>", 2830 + "Switch to another task", 0, 2831 + KDB_ENABLE_INSPECT); 2832 + kdb_register_flags("reboot", kdb_reboot, "", 2833 + "Reboot the machine immediately", 0, 2834 + KDB_ENABLE_REBOOT); 2861 2835 #if defined(CONFIG_MODULES) 2862 - kdb_register_repeat("lsmod", kdb_lsmod, "", 2863 - "List loaded kernel modules", 0, KDB_REPEAT_NONE); 2836 + kdb_register_flags("lsmod", kdb_lsmod, "", 2837 + "List loaded kernel modules", 0, 2838 + KDB_ENABLE_INSPECT); 2864 2839 #endif 2865 2840 #if defined(CONFIG_MAGIC_SYSRQ) 2866 - kdb_register_repeat("sr", kdb_sr, "<key>", 2867 - "Magic SysRq key", 0, KDB_REPEAT_NONE); 2841 + kdb_register_flags("sr", kdb_sr, "<key>", 2842 + "Magic SysRq key", 0, 2843 + KDB_ENABLE_ALWAYS_SAFE); 2868 2844 #endif 2869 2845 #if defined(CONFIG_PRINTK) 2870 - kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", 2871 - "Display syslog buffer", 0, KDB_REPEAT_NONE); 2846 + kdb_register_flags("dmesg", kdb_dmesg, "[lines]", 2847 + "Display syslog buffer", 0, 2848 + KDB_ENABLE_ALWAYS_SAFE); 2872 2849 #endif 2873 2850 if (arch_kgdb_ops.enable_nmi) { 2874 - kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", 2875 - "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); 2851 + kdb_register_flags("disable_nmi", kdb_disable_nmi, "", 2852 + "Disable NMI entry to KDB", 0, 2853 + KDB_ENABLE_ALWAYS_SAFE); 2876 2854 } 2877 - kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", 2878 - "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); 2879 - kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", 2880 - "Send a signal to a process", 0, KDB_REPEAT_NONE); 2881 - kdb_register_repeat("summary", kdb_summary, "", 2882 - "Summarize the system", 4, KDB_REPEAT_NONE); 2883 - kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", 2884 - "Display per_cpu variables", 3, KDB_REPEAT_NONE); 2885 - kdb_register_repeat("grephelp", kdb_grep_help, "", 2886 - "Display help on | grep", 0, KDB_REPEAT_NONE); 2855 + kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", 2856 + "Define a set of commands, down to endefcmd", 0, 2857 + KDB_ENABLE_ALWAYS_SAFE); 2858 + kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", 2859 + "Send a signal to a process", 0, 2860 + KDB_ENABLE_SIGNAL); 2861 + kdb_register_flags("summary", kdb_summary, "", 2862 + "Summarize the system", 4, 2863 + KDB_ENABLE_ALWAYS_SAFE); 2864 + kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", 2865 + "Display per_cpu variables", 3, 2866 + KDB_ENABLE_MEM_READ); 2867 + kdb_register_flags("grephelp", kdb_grep_help, "", 2868 + "Display help on | grep", 0, 2869 + KDB_ENABLE_ALWAYS_SAFE); 2887 2870 } 2888 2871 2889 2872 /* Execute any commands defined in kdb_cmds. */
+1 -2
kernel/debug/kdb/kdb_private.h
··· 172 172 kdb_func_t cmd_func; /* Function to execute command */ 173 173 char *cmd_usage; /* Usage String for this command */ 174 174 char *cmd_help; /* Help message for this command */ 175 - short cmd_flags; /* Parsing flags */ 176 175 short cmd_minlen; /* Minimum legal # command 177 176 * chars required */ 178 - kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ 177 + kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ 179 178 } kdbtab_t; 180 179 181 180 extern int kdb_bt(int, const char **); /* KDB display back trace */
+8 -11
kernel/events/core.c
··· 4461 4461 } 4462 4462 4463 4463 static void perf_sample_regs_user(struct perf_regs *regs_user, 4464 - struct pt_regs *regs) 4464 + struct pt_regs *regs, 4465 + struct pt_regs *regs_user_copy) 4465 4466 { 4466 - if (!user_mode(regs)) { 4467 - if (current->mm) 4468 - regs = task_pt_regs(current); 4469 - else 4470 - regs = NULL; 4471 - } 4472 - 4473 - if (regs) { 4474 - regs_user->abi = perf_reg_abi(current); 4467 + if (user_mode(regs)) { 4468 + regs_user->abi = perf_reg_abi(current); 4475 4469 regs_user->regs = regs; 4470 + } else if (current->mm) { 4471 + perf_get_regs_user(regs_user, regs, regs_user_copy); 4476 4472 } else { 4477 4473 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 4478 4474 regs_user->regs = NULL; ··· 4947 4951 } 4948 4952 4949 4953 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) 4950 - perf_sample_regs_user(&data->regs_user, regs); 4954 + perf_sample_regs_user(&data->regs_user, regs, 4955 + &data->regs_user_copy); 4951 4956 4952 4957 if (sample_type & PERF_SAMPLE_REGS_USER) { 4953 4958 /* regs dump ABI info */
+9 -3
kernel/exit.c
··· 1287 1287 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1288 1288 struct task_struct *p) 1289 1289 { 1290 + /* 1291 + * We can race with wait_task_zombie() from another thread. 1292 + * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1293 + * can't confuse the checks below. 1294 + */ 1295 + int exit_state = ACCESS_ONCE(p->exit_state); 1290 1296 int ret; 1291 1297 1292 - if (unlikely(p->exit_state == EXIT_DEAD)) 1298 + if (unlikely(exit_state == EXIT_DEAD)) 1293 1299 return 0; 1294 1300 1295 1301 ret = eligible_child(wo, p); ··· 1316 1310 return 0; 1317 1311 } 1318 1312 1319 - if (unlikely(p->exit_state == EXIT_TRACE)) { 1313 + if (unlikely(exit_state == EXIT_TRACE)) { 1320 1314 /* 1321 1315 * ptrace == 0 means we are the natural parent. In this case 1322 1316 * we should clear notask_error, debugger will notify us. ··· 1343 1337 } 1344 1338 1345 1339 /* slay zombie? */ 1346 - if (p->exit_state == EXIT_ZOMBIE) { 1340 + if (exit_state == EXIT_ZOMBIE) { 1347 1341 /* we don't reap group leaders with subthreads */ 1348 1342 if (!delay_group_leader(p)) { 1349 1343 /*
+1 -1
kernel/locking/mutex-debug.c
··· 80 80 DEBUG_LOCKS_WARN_ON(lock->owner != current); 81 81 82 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 83 - mutex_clear_owner(lock); 84 83 } 85 84 86 85 /* 87 86 * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug 88 87 * mutexes so that we can do it here after we've verified state. 89 88 */ 89 + mutex_clear_owner(lock); 90 90 atomic_set(&lock->count, 1); 91 91 } 92 92
+5 -5
kernel/range.c
··· 113 113 { 114 114 const struct range *r1 = x1; 115 115 const struct range *r2 = x2; 116 - s64 start1, start2; 117 116 118 - start1 = r1->start; 119 - start2 = r2->start; 120 - 121 - return start1 - start2; 117 + if (r1->start < r2->start) 118 + return -1; 119 + if (r1->start > r2->start) 120 + return 1; 121 + return 0; 122 122 } 123 123 124 124 int clean_sort_range(struct range *range, int az)
+6 -9
kernel/sched/core.c
··· 7113 7113 #ifdef CONFIG_RT_GROUP_SCHED 7114 7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7115 7115 #endif 7116 - #ifdef CONFIG_CPUMASK_OFFSTACK 7117 - alloc_size += num_possible_cpus() * cpumask_size(); 7118 - #endif 7119 7116 if (alloc_size) { 7120 7117 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7121 7118 ··· 7132 7135 ptr += nr_cpu_ids * sizeof(void **); 7133 7136 7134 7137 #endif /* CONFIG_RT_GROUP_SCHED */ 7135 - #ifdef CONFIG_CPUMASK_OFFSTACK 7136 - for_each_possible_cpu(i) { 7137 - per_cpu(load_balance_mask, i) = (void *)ptr; 7138 - ptr += cpumask_size(); 7139 - } 7140 - #endif /* CONFIG_CPUMASK_OFFSTACK */ 7141 7138 } 7139 + #ifdef CONFIG_CPUMASK_OFFSTACK 7140 + for_each_possible_cpu(i) { 7141 + per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( 7142 + cpumask_size(), GFP_KERNEL, cpu_to_node(i)); 7143 + } 7144 + #endif /* CONFIG_CPUMASK_OFFSTACK */ 7142 7145 7143 7146 init_rt_bandwidth(&def_rt_bandwidth, 7144 7147 global_rt_period(), global_rt_runtime());
+4 -21
kernel/sched/deadline.c
··· 570 570 static 571 571 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 572 572 { 573 - int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 574 - int rorun = dl_se->runtime <= 0; 575 - 576 - if (!rorun && !dmiss) 577 - return 0; 578 - 579 - /* 580 - * If we are beyond our current deadline and we are still 581 - * executing, then we have already used some of the runtime of 582 - * the next instance. Thus, if we do not account that, we are 583 - * stealing bandwidth from the system at each deadline miss! 584 - */ 585 - if (dmiss) { 586 - dl_se->runtime = rorun ? dl_se->runtime : 0; 587 - dl_se->runtime -= rq_clock(rq) - dl_se->deadline; 588 - } 589 - 590 - return 1; 573 + return (dl_se->runtime <= 0); 591 574 } 592 575 593 576 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); ··· 809 826 * parameters of the task might need updating. Otherwise, 810 827 * we want a replenishment of its runtime. 811 828 */ 812 - if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 813 - replenish_dl_entity(dl_se, pi_se); 814 - else 829 + if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) 815 830 update_dl_entity(dl_se, pi_se); 831 + else if (flags & ENQUEUE_REPLENISH) 832 + replenish_dl_entity(dl_se, pi_se); 816 833 817 834 __enqueue_dl_entity(dl_se); 818 835 }
+5 -1
kernel/sched/fair.c
··· 4005 4005 4006 4006 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4007 4007 { 4008 + /* init_cfs_bandwidth() was not called */ 4009 + if (!cfs_b->throttled_cfs_rq.next) 4010 + return; 4011 + 4008 4012 hrtimer_cancel(&cfs_b->period_timer); 4009 4013 hrtimer_cancel(&cfs_b->slack_timer); 4010 4014 } ··· 4428 4424 * wl = S * s'_i; see (2) 4429 4425 */ 4430 4426 if (W > 0 && w < W) 4431 - wl = (w * tg->shares) / W; 4427 + wl = (w * (long)tg->shares) / W; 4432 4428 else 4433 4429 wl = tg->shares; 4434 4430
+45 -8
kernel/trace/ftrace.c
··· 2497 2497 } 2498 2498 2499 2499 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2500 - struct ftrace_hash *old_hash) 2500 + struct ftrace_ops_hash *old_hash) 2501 2501 { 2502 2502 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2503 - ops->old_hash.filter_hash = old_hash; 2503 + ops->old_hash.filter_hash = old_hash->filter_hash; 2504 + ops->old_hash.notrace_hash = old_hash->notrace_hash; 2504 2505 ftrace_run_update_code(command); 2505 2506 ops->old_hash.filter_hash = NULL; 2507 + ops->old_hash.notrace_hash = NULL; 2506 2508 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2507 2509 } 2508 2510 ··· 3581 3579 3582 3580 static int ftrace_probe_registered; 3583 3581 3584 - static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) 3582 + static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) 3585 3583 { 3586 3584 int ret; 3587 3585 int i; ··· 3639 3637 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3640 3638 void *data) 3641 3639 { 3640 + struct ftrace_ops_hash old_hash_ops; 3642 3641 struct ftrace_func_probe *entry; 3643 3642 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3644 3643 struct ftrace_hash *old_hash = *orig_hash; ··· 3660 3657 return -EINVAL; 3661 3658 3662 3659 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3660 + 3661 + old_hash_ops.filter_hash = old_hash; 3662 + /* Probes only have filters */ 3663 + old_hash_ops.notrace_hash = NULL; 3663 3664 3664 3665 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 3665 3666 if (!hash) { ··· 3725 3718 3726 3719 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3727 3720 3728 - __enable_ftrace_function_probe(old_hash); 3721 + __enable_ftrace_function_probe(&old_hash_ops); 3729 3722 3730 3723 if (!ret) 3731 3724 free_ftrace_hash_rcu(old_hash); ··· 4013 4006 } 4014 4007 4015 4008 static void ftrace_ops_update_code(struct ftrace_ops *ops, 4016 - struct ftrace_hash *old_hash) 4009 + struct ftrace_ops_hash *old_hash) 4017 4010 { 4018 - if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 4011 + struct ftrace_ops *op; 4012 + 4013 + if (!ftrace_enabled) 4014 + return; 4015 + 4016 + if (ops->flags & FTRACE_OPS_FL_ENABLED) { 4019 4017 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 4018 + return; 4019 + } 4020 + 4021 + /* 4022 + * If this is the shared global_ops filter, then we need to 4023 + * check if there is another ops that shares it, is enabled. 4024 + * If so, we still need to run the modify code. 4025 + */ 4026 + if (ops->func_hash != &global_ops.local_hash) 4027 + return; 4028 + 4029 + do_for_each_ftrace_op(op, ftrace_ops_list) { 4030 + if (op->func_hash == &global_ops.local_hash && 4031 + op->flags & FTRACE_OPS_FL_ENABLED) { 4032 + ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4033 + /* Only need to do this once */ 4034 + return; 4035 + } 4036 + } while_for_each_ftrace_op(op); 4020 4037 } 4021 4038 4022 4039 static int ··· 4048 4017 unsigned long ip, int remove, int reset, int enable) 4049 4018 { 4050 4019 struct ftrace_hash **orig_hash; 4020 + struct ftrace_ops_hash old_hash_ops; 4051 4021 struct ftrace_hash *old_hash; 4052 4022 struct ftrace_hash *hash; 4053 4023 int ret; ··· 4085 4053 4086 4054 mutex_lock(&ftrace_lock); 4087 4055 old_hash = *orig_hash; 4056 + old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4057 + old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4088 4058 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4089 4059 if (!ret) { 4090 - ftrace_ops_update_code(ops, old_hash); 4060 + ftrace_ops_update_code(ops, &old_hash_ops); 4091 4061 free_ftrace_hash_rcu(old_hash); 4092 4062 } 4093 4063 mutex_unlock(&ftrace_lock); ··· 4301 4267 int ftrace_regex_release(struct inode *inode, struct file *file) 4302 4268 { 4303 4269 struct seq_file *m = (struct seq_file *)file->private_data; 4270 + struct ftrace_ops_hash old_hash_ops; 4304 4271 struct ftrace_iterator *iter; 4305 4272 struct ftrace_hash **orig_hash; 4306 4273 struct ftrace_hash *old_hash; ··· 4335 4300 4336 4301 mutex_lock(&ftrace_lock); 4337 4302 old_hash = *orig_hash; 4303 + old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; 4304 + old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; 4338 4305 ret = ftrace_hash_move(iter->ops, filter_hash, 4339 4306 orig_hash, iter->hash); 4340 4307 if (!ret) { 4341 - ftrace_ops_update_code(iter->ops, old_hash); 4308 + ftrace_ops_update_code(iter->ops, &old_hash_ops); 4342 4309 free_ftrace_hash_rcu(old_hash); 4343 4310 } 4344 4311 mutex_unlock(&ftrace_lock);
-1
kernel/trace/trace.c
··· 6918 6918 tracepoint_printk = 0; 6919 6919 } 6920 6920 tracer_alloc_buffers(); 6921 - init_ftrace_syscalls(); 6922 6921 trace_event_init(); 6923 6922 } 6924 6923
+55 -14
kernel/trace/trace_events.c
··· 2429 2429 return 0; 2430 2430 } 2431 2431 2432 + static __init void 2433 + early_enable_events(struct trace_array *tr, bool disable_first) 2434 + { 2435 + char *buf = bootup_event_buf; 2436 + char *token; 2437 + int ret; 2438 + 2439 + while (true) { 2440 + token = strsep(&buf, ","); 2441 + 2442 + if (!token) 2443 + break; 2444 + if (!*token) 2445 + continue; 2446 + 2447 + /* Restarting syscalls requires that we stop them first */ 2448 + if (disable_first) 2449 + ftrace_set_clr_event(tr, token, 0); 2450 + 2451 + ret = ftrace_set_clr_event(tr, token, 1); 2452 + if (ret) 2453 + pr_warn("Failed to enable trace event: %s\n", token); 2454 + 2455 + /* Put back the comma to allow this to be called again */ 2456 + if (buf) 2457 + *(buf - 1) = ','; 2458 + } 2459 + } 2460 + 2432 2461 static __init int event_trace_enable(void) 2433 2462 { 2434 2463 struct trace_array *tr = top_trace_array(); 2435 2464 struct ftrace_event_call **iter, *call; 2436 - char *buf = bootup_event_buf; 2437 - char *token; 2438 2465 int ret; 2439 2466 2440 2467 if (!tr) ··· 2483 2456 */ 2484 2457 __trace_early_add_events(tr); 2485 2458 2486 - while (true) { 2487 - token = strsep(&buf, ","); 2488 - 2489 - if (!token) 2490 - break; 2491 - if (!*token) 2492 - continue; 2493 - 2494 - ret = ftrace_set_clr_event(tr, token, 1); 2495 - if (ret) 2496 - pr_warn("Failed to enable trace event: %s\n", token); 2497 - } 2459 + early_enable_events(tr, false); 2498 2460 2499 2461 trace_printk_start_comm(); 2500 2462 ··· 2493 2477 2494 2478 return 0; 2495 2479 } 2480 + 2481 + /* 2482 + * event_trace_enable() is called from trace_event_init() first to 2483 + * initialize events and perhaps start any events that are on the 2484 + * command line. Unfortunately, there are some events that will not 2485 + * start this early, like the system call tracepoints that need 2486 + * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() 2487 + * is called before pid 1 starts, and this flag is never set, making 2488 + * the syscall tracepoint never get reached, but the event is enabled 2489 + * regardless (and not doing anything). 2490 + */ 2491 + static __init int event_trace_enable_again(void) 2492 + { 2493 + struct trace_array *tr; 2494 + 2495 + tr = top_trace_array(); 2496 + if (!tr) 2497 + return -ENODEV; 2498 + 2499 + early_enable_events(tr, true); 2500 + 2501 + return 0; 2502 + } 2503 + 2504 + early_initcall(event_trace_enable_again); 2496 2505 2497 2506 static __init int event_trace_init(void) 2498 2507 {
+2 -2
kernel/trace/trace_kdb.c
··· 132 132 133 133 static __init int kdb_ftrace_register(void) 134 134 { 135 - kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 136 - "Dump ftrace log", 0, KDB_REPEAT_NONE); 135 + kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 136 + "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); 137 137 return 0; 138 138 } 139 139
+25
lib/Kconfig.kgdb
··· 73 73 help 74 74 KDB frontend for kernel 75 75 76 + config KDB_DEFAULT_ENABLE 77 + hex "KDB: Select kdb command functions to be enabled by default" 78 + depends on KGDB_KDB 79 + default 0x1 80 + help 81 + Specifiers which kdb commands are enabled by default. This may 82 + be set to 1 or 0 to enable all commands or disable almost all 83 + commands. 84 + 85 + Alternatively the following bitmask applies: 86 + 87 + 0x0002 - allow arbitrary reads from memory and symbol lookup 88 + 0x0004 - allow arbitrary writes to memory 89 + 0x0008 - allow current register state to be inspected 90 + 0x0010 - allow current register state to be modified 91 + 0x0020 - allow passive inspection (backtrace, process list, lsmod) 92 + 0x0040 - allow flow control management (breakpoint, single step) 93 + 0x0080 - enable signalling of processes 94 + 0x0100 - allow machine to be rebooted 95 + 96 + The config option merely sets the default at boot time. Both 97 + issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or 98 + setting with kdb.cmd_enable=X kernel command line option will 99 + override the default settings. 100 + 76 101 config KDB_KEYBOARD 77 102 bool "KGDB_KDB: keyboard as input device" 78 103 depends on VT && KGDB_KDB
+1
lib/assoc_array.c
··· 11 11 * 2 of the Licence, or (at your option) any later version. 12 12 */ 13 13 //#define DEBUG 14 + #include <linux/rcupdate.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/err.h> 16 17 #include <linux/assoc_array_priv.h>
-9
mm/Kconfig.debug
··· 14 14 depends on !KMEMCHECK 15 15 select PAGE_EXTENSION 16 16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC 17 - select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC 18 17 ---help--- 19 18 Unmap pages from the kernel linear mapping after free_pages(). 20 19 This results in a large slowdown, but helps to find certain types ··· 26 27 that would result in incorrect warnings of memory corruption after 27 28 a resume because free pages are not saved to the suspend image. 28 29 29 - config WANT_PAGE_DEBUG_FLAGS 30 - bool 31 - 32 30 config PAGE_POISONING 33 31 bool 34 - select WANT_PAGE_DEBUG_FLAGS 35 - 36 - config PAGE_GUARD 37 - bool 38 - select WANT_PAGE_DEBUG_FLAGS
+12 -17
mm/filemap.c
··· 1046 1046 * @mapping: the address_space to search 1047 1047 * @offset: the page index 1048 1048 * @fgp_flags: PCG flags 1049 - * @cache_gfp_mask: gfp mask to use for the page cache data page allocation 1050 - * @radix_gfp_mask: gfp mask to use for radix tree node allocation 1049 + * @gfp_mask: gfp mask to use for the page cache data page allocation 1051 1050 * 1052 1051 * Looks up the page cache slot at @mapping & @offset. 1053 1052 * ··· 1055 1056 * FGP_ACCESSED: the page will be marked accessed 1056 1057 * FGP_LOCK: Page is return locked 1057 1058 * FGP_CREAT: If page is not present then a new page is allocated using 1058 - * @cache_gfp_mask and added to the page cache and the VM's LRU 1059 - * list. If radix tree nodes are allocated during page cache 1060 - * insertion then @radix_gfp_mask is used. The page is returned 1061 - * locked and with an increased refcount. Otherwise, %NULL is 1062 - * returned. 1059 + * @gfp_mask and added to the page cache and the VM's LRU 1060 + * list. The page is returned locked and with an increased 1061 + * refcount. Otherwise, %NULL is returned. 1063 1062 * 1064 1063 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even 1065 1064 * if the GFP flags specified for FGP_CREAT are atomic. ··· 1065 1068 * If there is a page cache page, it is returned with an increased refcount. 1066 1069 */ 1067 1070 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 1068 - int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) 1071 + int fgp_flags, gfp_t gfp_mask) 1069 1072 { 1070 1073 struct page *page; 1071 1074 ··· 1102 1105 if (!page && (fgp_flags & FGP_CREAT)) { 1103 1106 int err; 1104 1107 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) 1105 - cache_gfp_mask |= __GFP_WRITE; 1106 - if (fgp_flags & FGP_NOFS) { 1107 - cache_gfp_mask &= ~__GFP_FS; 1108 - radix_gfp_mask &= ~__GFP_FS; 1109 - } 1108 + gfp_mask |= __GFP_WRITE; 1109 + if (fgp_flags & FGP_NOFS) 1110 + gfp_mask &= ~__GFP_FS; 1110 1111 1111 - page = __page_cache_alloc(cache_gfp_mask); 1112 + page = __page_cache_alloc(gfp_mask); 1112 1113 if (!page) 1113 1114 return NULL; 1114 1115 ··· 1117 1122 if (fgp_flags & FGP_ACCESSED) 1118 1123 __SetPageReferenced(page); 1119 1124 1120 - err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); 1125 + err = add_to_page_cache_lru(page, mapping, offset, 1126 + gfp_mask & GFP_RECLAIM_MASK); 1121 1127 if (unlikely(err)) { 1122 1128 page_cache_release(page); 1123 1129 page = NULL; ··· 2439 2443 fgp_flags |= FGP_NOFS; 2440 2444 2441 2445 page = pagecache_get_page(mapping, index, fgp_flags, 2442 - mapping_gfp_mask(mapping), 2443 - GFP_KERNEL); 2446 + mapping_gfp_mask(mapping)); 2444 2447 if (page) 2445 2448 wait_for_stable_page(page); 2446 2449
+4 -13
mm/memcontrol.c
··· 3043 3043 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { 3044 3044 mem_cgroup_swap_statistics(from, false); 3045 3045 mem_cgroup_swap_statistics(to, true); 3046 - /* 3047 - * This function is only called from task migration context now. 3048 - * It postpones page_counter and refcount handling till the end 3049 - * of task migration(mem_cgroup_clear_mc()) for performance 3050 - * improvement. But we cannot postpone css_get(to) because if 3051 - * the process that has been moved to @to does swap-in, the 3052 - * refcount of @to might be decreased to 0. 3053 - * 3054 - * We are in attach() phase, so the cgroup is guaranteed to be 3055 - * alive, so we can just call css_get(). 3056 - */ 3057 - css_get(&to->css); 3058 3046 return 0; 3059 3047 } 3060 3048 return -EINVAL; ··· 4667 4679 if (parent_css == NULL) { 4668 4680 root_mem_cgroup = memcg; 4669 4681 page_counter_init(&memcg->memory, NULL); 4682 + memcg->soft_limit = PAGE_COUNTER_MAX; 4670 4683 page_counter_init(&memcg->memsw, NULL); 4671 4684 page_counter_init(&memcg->kmem, NULL); 4672 4685 } ··· 4713 4724 4714 4725 if (parent->use_hierarchy) { 4715 4726 page_counter_init(&memcg->memory, &parent->memory); 4727 + memcg->soft_limit = PAGE_COUNTER_MAX; 4716 4728 page_counter_init(&memcg->memsw, &parent->memsw); 4717 4729 page_counter_init(&memcg->kmem, &parent->kmem); 4718 4730 ··· 4723 4733 */ 4724 4734 } else { 4725 4735 page_counter_init(&memcg->memory, NULL); 4736 + memcg->soft_limit = PAGE_COUNTER_MAX; 4726 4737 page_counter_init(&memcg->memsw, NULL); 4727 4738 page_counter_init(&memcg->kmem, NULL); 4728 4739 /* ··· 4798 4807 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); 4799 4808 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); 4800 4809 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); 4801 - memcg->soft_limit = 0; 4810 + memcg->soft_limit = PAGE_COUNTER_MAX; 4802 4811 } 4803 4812 4804 4813 #ifdef CONFIG_MMU
+23 -16
mm/memory.c
··· 235 235 236 236 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 237 237 { 238 + if (!tlb->end) 239 + return; 240 + 238 241 tlb_flush(tlb); 239 242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); 240 243 #ifdef CONFIG_HAVE_RCU_TABLE_FREE ··· 250 247 { 251 248 struct mmu_gather_batch *batch; 252 249 253 - for (batch = &tlb->local; batch; batch = batch->next) { 250 + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 254 251 free_pages_and_swap_cache(batch->pages, batch->nr); 255 252 batch->nr = 0; 256 253 } ··· 259 256 260 257 void tlb_flush_mmu(struct mmu_gather *tlb) 261 258 { 262 - if (!tlb->end) 263 - return; 264 - 265 259 tlb_flush_mmu_tlbonly(tlb); 266 260 tlb_flush_mmu_free(tlb); 267 261 } ··· 2137 2137 if (!dirty_page) 2138 2138 return ret; 2139 2139 2140 - /* 2141 - * Yes, Virginia, this is actually required to prevent a race 2142 - * with clear_page_dirty_for_io() from clearing the page dirty 2143 - * bit after it clear all dirty ptes, but before a racing 2144 - * do_wp_page installs a dirty pte. 2145 - * 2146 - * do_shared_fault is protected similarly. 2147 - */ 2148 2140 if (!page_mkwrite) { 2149 - wait_on_page_locked(dirty_page); 2150 - set_page_dirty_balance(dirty_page); 2141 + struct address_space *mapping; 2142 + int dirtied; 2143 + 2144 + lock_page(dirty_page); 2145 + dirtied = set_page_dirty(dirty_page); 2146 + VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); 2147 + mapping = dirty_page->mapping; 2148 + unlock_page(dirty_page); 2149 + 2150 + if (dirtied && mapping) { 2151 + /* 2152 + * Some device drivers do not set page.mapping 2153 + * but still dirty their pages 2154 + */ 2155 + balance_dirty_pages_ratelimited(mapping); 2156 + } 2157 + 2151 2158 /* file_update_time outside page_lock */ 2152 2159 if (vma->vm_file) 2153 2160 file_update_time(vma->vm_file); ··· 2600 2593 if (prev && prev->vm_end == address) 2601 2594 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 2602 2595 2603 - expand_downwards(vma, address - PAGE_SIZE); 2596 + return expand_downwards(vma, address - PAGE_SIZE); 2604 2597 } 2605 2598 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 2606 2599 struct vm_area_struct *next = vma->vm_next; ··· 2609 2602 if (next && next->vm_start == address + PAGE_SIZE) 2610 2603 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; 2611 2604 2612 - expand_upwards(vma, address + PAGE_SIZE); 2605 + return expand_upwards(vma, address + PAGE_SIZE); 2613 2606 } 2614 2607 return 0; 2615 2608 }
+10 -5
mm/mmap.c
··· 778 778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 779 779 int error; 780 780 781 - error = anon_vma_clone(importer, exporter); 782 - if (error) 783 - return error; 784 781 importer->anon_vma = exporter->anon_vma; 782 + error = anon_vma_clone(importer, exporter); 783 + if (error) { 784 + importer->anon_vma = NULL; 785 + return error; 786 + } 785 787 } 786 788 } 787 789 ··· 2101 2099 { 2102 2100 struct mm_struct *mm = vma->vm_mm; 2103 2101 struct rlimit *rlim = current->signal->rlim; 2104 - unsigned long new_start; 2102 + unsigned long new_start, actual_size; 2105 2103 2106 2104 /* address space limit tests */ 2107 2105 if (!may_expand_vm(mm, grow)) 2108 2106 return -ENOMEM; 2109 2107 2110 2108 /* Stack limit test */ 2111 - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2109 + actual_size = size; 2110 + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2111 + actual_size -= PAGE_SIZE; 2112 + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2112 2113 return -ENOMEM; 2113 2114 2114 2115 /* mlock limit tests */
+12 -31
mm/page-writeback.c
··· 1541 1541 bdi_start_background_writeback(bdi); 1542 1542 } 1543 1543 1544 - void set_page_dirty_balance(struct page *page) 1545 - { 1546 - if (set_page_dirty(page)) { 1547 - struct address_space *mapping = page_mapping(page); 1548 - 1549 - if (mapping) 1550 - balance_dirty_pages_ratelimited(mapping); 1551 - } 1552 - } 1553 - 1554 1544 static DEFINE_PER_CPU(int, bdp_ratelimits); 1555 1545 1556 1546 /* ··· 2113 2123 * page dirty in that case, but not all the buffers. This is a "bottom-up" 2114 2124 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. 2115 2125 * 2116 - * Most callers have locked the page, which pins the address_space in memory. 2117 - * But zap_pte_range() does not lock the page, however in that case the 2118 - * mapping is pinned by the vma's ->vm_file reference. 2119 - * 2120 - * We take care to handle the case where the page was truncated from the 2121 - * mapping by re-checking page_mapping() inside tree_lock. 2126 + * The caller must ensure this doesn't race with truncation. Most will simply 2127 + * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and 2128 + * the pte lock held, which also locks out truncation. 2122 2129 */ 2123 2130 int __set_page_dirty_nobuffers(struct page *page) 2124 2131 { 2125 2132 if (!TestSetPageDirty(page)) { 2126 2133 struct address_space *mapping = page_mapping(page); 2127 - struct address_space *mapping2; 2128 2134 unsigned long flags; 2129 2135 2130 2136 if (!mapping) 2131 2137 return 1; 2132 2138 2133 2139 spin_lock_irqsave(&mapping->tree_lock, flags); 2134 - mapping2 = page_mapping(page); 2135 - if (mapping2) { /* Race with truncate? */ 2136 - BUG_ON(mapping2 != mapping); 2137 - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2138 - account_page_dirtied(page, mapping); 2139 - radix_tree_tag_set(&mapping->page_tree, 2140 - page_index(page), PAGECACHE_TAG_DIRTY); 2141 - } 2140 + BUG_ON(page_mapping(page) != mapping); 2141 + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 2142 + account_page_dirtied(page, mapping); 2143 + radix_tree_tag_set(&mapping->page_tree, page_index(page), 2144 + PAGECACHE_TAG_DIRTY); 2142 2145 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2143 2146 if (mapping->host) { 2144 2147 /* !PageAnon && !swapper_space */ ··· 2288 2305 /* 2289 2306 * We carefully synchronise fault handlers against 2290 2307 * installing a dirty pte and marking the page dirty 2291 - * at this point. We do this by having them hold the 2292 - * page lock at some point after installing their 2293 - * pte, but before marking the page dirty. 2294 - * Pages are always locked coming in here, so we get 2295 - * the desired exclusion. See mm/memory.c:do_wp_page() 2296 - * for more comments. 2308 + * at this point. We do this by having them hold the 2309 + * page lock while dirtying the page, and pages are 2310 + * always locked coming in here, so we get the desired 2311 + * exclusion. 2297 2312 */ 2298 2313 if (TestClearPageDirty(page)) { 2299 2314 dec_zone_page_state(page, NR_FILE_DIRTY);
+41 -1
mm/rmap.c
··· 72 72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 73 73 if (anon_vma) { 74 74 atomic_set(&anon_vma->refcount, 1); 75 + anon_vma->degree = 1; /* Reference for first vma */ 76 + anon_vma->parent = anon_vma; 75 77 /* 76 78 * Initialise the anon_vma root to point to itself. If called 77 79 * from fork, the root will be reset to the parents anon_vma. ··· 190 188 if (likely(!vma->anon_vma)) { 191 189 vma->anon_vma = anon_vma; 192 190 anon_vma_chain_link(vma, avc, anon_vma); 191 + /* vma reference or self-parent link for new root */ 192 + anon_vma->degree++; 193 193 allocated = NULL; 194 194 avc = NULL; 195 195 } ··· 240 236 /* 241 237 * Attach the anon_vmas from src to dst. 242 238 * Returns 0 on success, -ENOMEM on failure. 239 + * 240 + * If dst->anon_vma is NULL this function tries to find and reuse existing 241 + * anon_vma which has no vmas and only one child anon_vma. This prevents 242 + * degradation of anon_vma hierarchy to endless linear chain in case of 243 + * constantly forking task. On the other hand, an anon_vma with more than one 244 + * child isn't reused even if there was no alive vma, thus rmap walker has a 245 + * good chance of avoiding scanning the whole hierarchy when it searches where 246 + * page is mapped. 243 247 */ 244 248 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 245 249 { ··· 268 256 anon_vma = pavc->anon_vma; 269 257 root = lock_anon_vma_root(root, anon_vma); 270 258 anon_vma_chain_link(dst, avc, anon_vma); 259 + 260 + /* 261 + * Reuse existing anon_vma if its degree lower than two, 262 + * that means it has no vma and only one anon_vma child. 263 + * 264 + * Do not chose parent anon_vma, otherwise first child 265 + * will always reuse it. Root anon_vma is never reused: 266 + * it has self-parent reference and at least one child. 267 + */ 268 + if (!dst->anon_vma && anon_vma != src->anon_vma && 269 + anon_vma->degree < 2) 270 + dst->anon_vma = anon_vma; 271 271 } 272 + if (dst->anon_vma) 273 + dst->anon_vma->degree++; 272 274 unlock_anon_vma_root(root); 273 275 return 0; 274 276 ··· 306 280 if (!pvma->anon_vma) 307 281 return 0; 308 282 283 + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 284 + vma->anon_vma = NULL; 285 + 309 286 /* 310 287 * First, attach the new VMA to the parent VMA's anon_vmas, 311 288 * so rmap can find non-COWed pages in child processes. ··· 316 287 error = anon_vma_clone(vma, pvma); 317 288 if (error) 318 289 return error; 290 + 291 + /* An existing anon_vma has been reused, all done then. */ 292 + if (vma->anon_vma) 293 + return 0; 319 294 320 295 /* Then add our own anon_vma. */ 321 296 anon_vma = anon_vma_alloc(); ··· 334 301 * lock any of the anon_vmas in this anon_vma tree. 335 302 */ 336 303 anon_vma->root = pvma->anon_vma->root; 304 + anon_vma->parent = pvma->anon_vma; 337 305 /* 338 306 * With refcounts, an anon_vma can stay around longer than the 339 307 * process it belongs to. The root anon_vma needs to be pinned until ··· 345 311 vma->anon_vma = anon_vma; 346 312 anon_vma_lock_write(anon_vma); 347 313 anon_vma_chain_link(vma, avc, anon_vma); 314 + anon_vma->parent->degree++; 348 315 anon_vma_unlock_write(anon_vma); 349 316 350 317 return 0; ··· 376 341 * Leave empty anon_vmas on the list - we'll need 377 342 * to free them outside the lock. 378 343 */ 379 - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 344 + if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { 345 + anon_vma->parent->degree--; 380 346 continue; 347 + } 381 348 382 349 list_del(&avc->same_vma); 383 350 anon_vma_chain_free(avc); 384 351 } 352 + if (vma->anon_vma) 353 + vma->anon_vma->degree--; 385 354 unlock_anon_vma_root(root); 386 355 387 356 /* ··· 396 357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 397 358 struct anon_vma *anon_vma = avc->anon_vma; 398 359 360 + BUG_ON(anon_vma->degree); 399 361 put_anon_vma(anon_vma); 400 362 401 363 list_del(&avc->same_vma);
+13 -11
mm/vmscan.c
··· 2921 2921 return false; 2922 2922 2923 2923 /* 2924 - * There is a potential race between when kswapd checks its watermarks 2925 - * and a process gets throttled. There is also a potential race if 2926 - * processes get throttled, kswapd wakes, a large process exits therby 2927 - * balancing the zones that causes kswapd to miss a wakeup. If kswapd 2928 - * is going to sleep, no process should be sleeping on pfmemalloc_wait 2929 - * so wake them now if necessary. If necessary, processes will wake 2930 - * kswapd and get throttled again 2924 + * The throttled processes are normally woken up in balance_pgdat() as 2925 + * soon as pfmemalloc_watermark_ok() is true. But there is a potential 2926 + * race between when kswapd checks the watermarks and a process gets 2927 + * throttled. There is also a potential race if processes get 2928 + * throttled, kswapd wakes, a large process exits thereby balancing the 2929 + * zones, which causes kswapd to exit balance_pgdat() before reaching 2930 + * the wake up checks. If kswapd is going to sleep, no process should 2931 + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If 2932 + * the wake up is premature, processes will wake kswapd and get 2933 + * throttled again. The difference from wake ups in balance_pgdat() is 2934 + * that here we are under prepare_to_wait(). 2931 2935 */ 2932 - if (waitqueue_active(&pgdat->pfmemalloc_wait)) { 2933 - wake_up(&pgdat->pfmemalloc_wait); 2934 - return false; 2935 - } 2936 + if (waitqueue_active(&pgdat->pfmemalloc_wait)) 2937 + wake_up_all(&pgdat->pfmemalloc_wait); 2936 2938 2937 2939 return pgdat_balanced(pgdat, order, classzone_idx); 2938 2940 }
+2 -2
net/batman-adv/fragmentation.c
··· 251 251 kfree(entry); 252 252 253 253 /* Make room for the rest of the fragments. */ 254 - if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) { 254 + if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { 255 255 kfree_skb(skb_out); 256 256 skb_out = NULL; 257 257 goto free; ··· 434 434 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE 435 435 */ 436 436 mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); 437 - max_fragment_size = (mtu - header_size - ETH_HLEN); 437 + max_fragment_size = mtu - header_size; 438 438 max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; 439 439 440 440 /* Don't even try to fragment, if we need more than 16 fragments */
+1 -1
net/batman-adv/gateway_client.c
··· 810 810 goto out; 811 811 812 812 gw_node = batadv_gw_node_get(bat_priv, orig_dst_node); 813 - if (!gw_node->bandwidth_down == 0) 813 + if (!gw_node) 814 814 goto out; 815 815 816 816 switch (atomic_read(&bat_priv->gw_mode)) {
+7 -4
net/batman-adv/multicast.c
··· 685 685 if (orig_initialized) 686 686 atomic_dec(&bat_priv->mcast.num_disabled); 687 687 orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; 688 - /* If mcast support is being switched off increase the disabled 689 - * mcast node counter. 688 + /* If mcast support is being switched off or if this is an initial 689 + * OGM without mcast support then increase the disabled mcast 690 + * node counter. 690 691 */ 691 692 } else if (!orig_mcast_enabled && 692 - orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { 693 + (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || 694 + !orig_initialized)) { 693 695 atomic_inc(&bat_priv->mcast.num_disabled); 694 696 orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; 695 697 } ··· 740 738 { 741 739 struct batadv_priv *bat_priv = orig->bat_priv; 742 740 743 - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) 741 + if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && 742 + orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) 744 743 atomic_dec(&bat_priv->mcast.num_disabled); 745 744 746 745 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
+1 -1
net/batman-adv/network-coding.c
··· 133 133 if (!bat_priv->nc.decoding_hash) 134 134 goto err; 135 135 136 - batadv_hash_set_lock_class(bat_priv->nc.coding_hash, 136 + batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, 137 137 &batadv_nc_decoding_hash_lock_class_key); 138 138 139 139 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
+4 -3
net/batman-adv/originator.c
··· 570 570 571 571 batadv_frag_purge_orig(orig_node, NULL); 572 572 573 - batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, 574 - "originator timed out"); 575 - 576 573 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) 577 574 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); 578 575 ··· 675 678 atomic_set(&orig_node->last_ttvn, 0); 676 679 orig_node->tt_buff = NULL; 677 680 orig_node->tt_buff_len = 0; 681 + orig_node->last_seen = jiffies; 678 682 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 679 683 orig_node->bcast_seqno_reset = reset_time; 680 684 #ifdef CONFIG_BATMAN_ADV_MCAST ··· 975 977 if (batadv_purge_orig_node(bat_priv, orig_node)) { 976 978 batadv_gw_node_delete(bat_priv, orig_node); 977 979 hlist_del_rcu(&orig_node->hash_entry); 980 + batadv_tt_global_del_orig(orig_node->bat_priv, 981 + orig_node, -1, 982 + "originator timed out"); 978 983 batadv_orig_node_free_ref(orig_node); 979 984 continue; 980 985 }
+4 -2
net/batman-adv/routing.c
··· 443 443 444 444 router = batadv_orig_router_get(orig_node, recv_if); 445 445 446 + if (!router) 447 + return router; 448 + 446 449 /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) 447 450 * and if activated. 448 451 */ 449 - if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || 450 - !router) 452 + if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding))) 451 453 return router; 452 454 453 455 /* bonding: loop through the list of possible routers found
-1
net/bluetooth/6lowpan.c
··· 390 390 391 391 drop: 392 392 dev->stats.rx_dropped++; 393 - kfree_skb(skb); 394 393 return NET_RX_DROP; 395 394 } 396 395
+3
net/bluetooth/bnep/core.c
··· 533 533 534 534 BT_DBG(""); 535 535 536 + if (!l2cap_is_socket(sock)) 537 + return -EBADFD; 538 + 536 539 baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); 537 540 baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); 538 541
+3
net/bluetooth/cmtp/core.c
··· 334 334 335 335 BT_DBG(""); 336 336 337 + if (!l2cap_is_socket(sock)) 338 + return -EBADFD; 339 + 337 340 session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); 338 341 if (!session) 339 342 return -ENOMEM;
+12 -4
net/bluetooth/hci_event.c
··· 242 242 if (rp->status) 243 243 return; 244 244 245 - if (test_bit(HCI_SETUP, &hdev->dev_flags)) 245 + if (test_bit(HCI_SETUP, &hdev->dev_flags) || 246 + test_bit(HCI_CONFIG, &hdev->dev_flags)) 246 247 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 247 248 } 248 249 ··· 510 509 if (rp->status) 511 510 return; 512 511 513 - if (test_bit(HCI_SETUP, &hdev->dev_flags)) { 512 + if (test_bit(HCI_SETUP, &hdev->dev_flags) || 513 + test_bit(HCI_CONFIG, &hdev->dev_flags)) { 514 514 hdev->hci_ver = rp->hci_ver; 515 515 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 516 516 hdev->lmp_ver = rp->lmp_ver; ··· 530 528 if (rp->status) 531 529 return; 532 530 533 - if (test_bit(HCI_SETUP, &hdev->dev_flags)) 531 + if (test_bit(HCI_SETUP, &hdev->dev_flags) || 532 + test_bit(HCI_CONFIG, &hdev->dev_flags)) 534 533 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 535 534 } 536 535 ··· 2197 2194 return; 2198 2195 } 2199 2196 2200 - if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2197 + /* Require HCI_CONNECTABLE or a whitelist entry to accept the 2198 + * connection. These features are only touched through mgmt so 2199 + * only do the checks if HCI_MGMT is set. 2200 + */ 2201 + if (test_bit(HCI_MGMT, &hdev->dev_flags) && 2202 + !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && 2201 2203 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2202 2204 BDADDR_BREDR)) { 2203 2205 hci_reject_conn(hdev, &ev->bdaddr);
+2 -1
net/bluetooth/hidp/core.c
··· 1314 1314 { 1315 1315 struct hidp_session *session; 1316 1316 struct l2cap_conn *conn; 1317 - struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan; 1317 + struct l2cap_chan *chan; 1318 1318 int ret; 1319 1319 1320 1320 ret = hidp_verify_sockets(ctrl_sock, intr_sock); 1321 1321 if (ret) 1322 1322 return ret; 1323 1323 1324 + chan = l2cap_pi(ctrl_sock->sk)->chan; 1324 1325 conn = NULL; 1325 1326 l2cap_chan_lock(chan); 1326 1327 if (chan->conn)
+2 -1
net/bridge/br_input.c
··· 154 154 dst = NULL; 155 155 156 156 if (is_broadcast_ether_addr(dest)) { 157 - if (p->flags & BR_PROXYARP && 157 + if (IS_ENABLED(CONFIG_INET) && 158 + p->flags & BR_PROXYARP && 158 159 skb->protocol == htons(ETH_P_ARP)) 159 160 br_do_proxy_arp(skb, br, vid); 160 161
+1 -1
net/ceph/auth_x.c
··· 676 676 int ret; 677 677 char tmp_enc[40]; 678 678 __le32 tmp[5] = { 679 - 16u, msg->hdr.crc, msg->footer.front_crc, 679 + cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, 680 680 msg->footer.middle_crc, msg->footer.data_crc, 681 681 }; 682 682 ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+1 -1
net/ceph/mon_client.c
··· 717 717 if (src_len != sizeof(u32) + dst_len) 718 718 return -EINVAL; 719 719 720 - buf_len = le32_to_cpu(*(u32 *)src); 720 + buf_len = le32_to_cpu(*(__le32 *)src); 721 721 if (buf_len != dst_len) 722 722 return -EINVAL; 723 723
+100 -75
net/core/dev.c
··· 1694 1694 1695 1695 skb_scrub_packet(skb, true); 1696 1696 skb->protocol = eth_type_trans(skb, dev); 1697 + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1697 1698 1698 1699 return 0; 1699 1700 } ··· 2523 2522 /* If MPLS offload request, verify we are testing hardware MPLS features 2524 2523 * instead of standard features for the netdev. 2525 2524 */ 2526 - #ifdef CONFIG_NET_MPLS_GSO 2525 + #if IS_ENABLED(CONFIG_NET_MPLS_GSO) 2527 2526 static netdev_features_t net_mpls_features(struct sk_buff *skb, 2528 2527 netdev_features_t features, 2529 2528 __be16 type) ··· 2563 2562 2564 2563 netdev_features_t netif_skb_features(struct sk_buff *skb) 2565 2564 { 2566 - const struct net_device *dev = skb->dev; 2565 + struct net_device *dev = skb->dev; 2567 2566 netdev_features_t features = dev->features; 2568 2567 u16 gso_segs = skb_shinfo(skb)->gso_segs; 2569 2568 __be16 protocol = skb->protocol; ··· 2571 2570 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) 2572 2571 features &= ~NETIF_F_GSO_MASK; 2573 2572 2574 - if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { 2575 - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2576 - protocol = veh->h_vlan_encapsulated_proto; 2577 - } else if (!vlan_tx_tag_present(skb)) { 2578 - return harmonize_features(skb, features); 2573 + /* If encapsulation offload request, verify we are testing 2574 + * hardware encapsulation features instead of standard 2575 + * features for the netdev 2576 + */ 2577 + if (skb->encapsulation) 2578 + features &= dev->hw_enc_features; 2579 + 2580 + if (!vlan_tx_tag_present(skb)) { 2581 + if (unlikely(protocol == htons(ETH_P_8021Q) || 2582 + protocol == htons(ETH_P_8021AD))) { 2583 + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2584 + protocol = veh->h_vlan_encapsulated_proto; 2585 + } else { 2586 + goto finalize; 2587 + } 2579 2588 } 2580 2589 2581 2590 features = netdev_intersect_features(features, ··· 2601 2590 NETIF_F_GEN_CSUM | 2602 2591 NETIF_F_HW_VLAN_CTAG_TX | 2603 2592 NETIF_F_HW_VLAN_STAG_TX); 2593 + 2594 + finalize: 2595 + if (dev->netdev_ops->ndo_features_check) 2596 + features &= dev->netdev_ops->ndo_features_check(skb, dev, 2597 + features); 2604 2598 2605 2599 return harmonize_features(skb, features); 2606 2600 } ··· 2677 2661 if (unlikely(!skb)) 2678 2662 goto out_null; 2679 2663 2680 - /* If encapsulation offload request, verify we are testing 2681 - * hardware encapsulation features instead of standard 2682 - * features for the netdev 2683 - */ 2684 - if (skb->encapsulation) 2685 - features &= dev->hw_enc_features; 2686 - 2687 2664 if (netif_needs_gso(dev, skb, features)) { 2688 2665 struct sk_buff *segs; 2689 2666 2690 2667 segs = skb_gso_segment(skb, features); 2691 2668 if (IS_ERR(segs)) { 2692 - segs = NULL; 2669 + goto out_kfree_skb; 2693 2670 } else if (segs) { 2694 2671 consume_skb(skb); 2695 2672 skb = segs; ··· 4566 4557 } 4567 4558 EXPORT_SYMBOL(netif_napi_del); 4568 4559 4560 + static int napi_poll(struct napi_struct *n, struct list_head *repoll) 4561 + { 4562 + void *have; 4563 + int work, weight; 4564 + 4565 + list_del_init(&n->poll_list); 4566 + 4567 + have = netpoll_poll_lock(n); 4568 + 4569 + weight = n->weight; 4570 + 4571 + /* This NAPI_STATE_SCHED test is for avoiding a race 4572 + * with netpoll's poll_napi(). Only the entity which 4573 + * obtains the lock and sees NAPI_STATE_SCHED set will 4574 + * actually make the ->poll() call. Therefore we avoid 4575 + * accidentally calling ->poll() when NAPI is not scheduled. 4576 + */ 4577 + work = 0; 4578 + if (test_bit(NAPI_STATE_SCHED, &n->state)) { 4579 + work = n->poll(n, weight); 4580 + trace_napi_poll(n); 4581 + } 4582 + 4583 + WARN_ON_ONCE(work > weight); 4584 + 4585 + if (likely(work < weight)) 4586 + goto out_unlock; 4587 + 4588 + /* Drivers must not modify the NAPI state if they 4589 + * consume the entire weight. In such cases this code 4590 + * still "owns" the NAPI instance and therefore can 4591 + * move the instance around on the list at-will. 4592 + */ 4593 + if (unlikely(napi_disable_pending(n))) { 4594 + napi_complete(n); 4595 + goto out_unlock; 4596 + } 4597 + 4598 + if (n->gro_list) { 4599 + /* flush too old packets 4600 + * If HZ < 1000, flush all packets. 4601 + */ 4602 + napi_gro_flush(n, HZ >= 1000); 4603 + } 4604 + 4605 + /* Some drivers may have called napi_schedule 4606 + * prior to exhausting their budget. 4607 + */ 4608 + if (unlikely(!list_empty(&n->poll_list))) { 4609 + pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 4610 + n->dev ? n->dev->name : "backlog"); 4611 + goto out_unlock; 4612 + } 4613 + 4614 + list_add_tail(&n->poll_list, repoll); 4615 + 4616 + out_unlock: 4617 + netpoll_poll_unlock(have); 4618 + 4619 + return work; 4620 + } 4621 + 4569 4622 static void net_rx_action(struct softirq_action *h) 4570 4623 { 4571 4624 struct softnet_data *sd = this_cpu_ptr(&softnet_data); ··· 4635 4564 int budget = netdev_budget; 4636 4565 LIST_HEAD(list); 4637 4566 LIST_HEAD(repoll); 4638 - void *have; 4639 4567 4640 4568 local_irq_disable(); 4641 4569 list_splice_init(&sd->poll_list, &list); 4642 4570 local_irq_enable(); 4643 4571 4644 - while (!list_empty(&list)) { 4572 + for (;;) { 4645 4573 struct napi_struct *n; 4646 - int work, weight; 4574 + 4575 + if (list_empty(&list)) { 4576 + if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 4577 + return; 4578 + break; 4579 + } 4580 + 4581 + n = list_first_entry(&list, struct napi_struct, poll_list); 4582 + budget -= napi_poll(n, &repoll); 4647 4583 4648 4584 /* If softirq window is exhausted then punt. 4649 4585 * Allow this to run for 2 jiffies since which will allow 4650 4586 * an average latency of 1.5/HZ. 4651 4587 */ 4652 - if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) 4653 - goto softnet_break; 4654 - 4655 - 4656 - n = list_first_entry(&list, struct napi_struct, poll_list); 4657 - list_del_init(&n->poll_list); 4658 - 4659 - have = netpoll_poll_lock(n); 4660 - 4661 - weight = n->weight; 4662 - 4663 - /* This NAPI_STATE_SCHED test is for avoiding a race 4664 - * with netpoll's poll_napi(). Only the entity which 4665 - * obtains the lock and sees NAPI_STATE_SCHED set will 4666 - * actually make the ->poll() call. Therefore we avoid 4667 - * accidentally calling ->poll() when NAPI is not scheduled. 4668 - */ 4669 - work = 0; 4670 - if (test_bit(NAPI_STATE_SCHED, &n->state)) { 4671 - work = n->poll(n, weight); 4672 - trace_napi_poll(n); 4588 + if (unlikely(budget <= 0 || 4589 + time_after_eq(jiffies, time_limit))) { 4590 + sd->time_squeeze++; 4591 + break; 4673 4592 } 4674 - 4675 - WARN_ON_ONCE(work > weight); 4676 - 4677 - budget -= work; 4678 - 4679 - /* Drivers must not modify the NAPI state if they 4680 - * consume the entire weight. In such cases this code 4681 - * still "owns" the NAPI instance and therefore can 4682 - * move the instance around on the list at-will. 4683 - */ 4684 - if (unlikely(work == weight)) { 4685 - if (unlikely(napi_disable_pending(n))) { 4686 - napi_complete(n); 4687 - } else { 4688 - if (n->gro_list) { 4689 - /* flush too old packets 4690 - * If HZ < 1000, flush all packets. 4691 - */ 4692 - napi_gro_flush(n, HZ >= 1000); 4693 - } 4694 - list_add_tail(&n->poll_list, &repoll); 4695 - } 4696 - } 4697 - 4698 - netpoll_poll_unlock(have); 4699 4593 } 4700 4594 4701 - if (!sd_has_rps_ipi_waiting(sd) && 4702 - list_empty(&list) && 4703 - list_empty(&repoll)) 4704 - return; 4705 - out: 4706 4595 local_irq_disable(); 4707 4596 4708 4597 list_splice_tail_init(&sd->poll_list, &list); ··· 4672 4641 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 4673 4642 4674 4643 net_rps_action_and_irq_enable(sd); 4675 - 4676 - return; 4677 - 4678 - softnet_break: 4679 - sd->time_squeeze++; 4680 - goto out; 4681 4644 } 4682 4645 4683 4646 struct netdev_adjacent {
+44
net/core/neighbour.c
··· 2043 2043 case NDTPA_BASE_REACHABLE_TIME: 2044 2044 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, 2045 2045 nla_get_msecs(tbp[i])); 2046 + /* update reachable_time as well, otherwise, the change will 2047 + * only be effective after the next time neigh_periodic_work 2048 + * decides to recompute it (can be multiple minutes) 2049 + */ 2050 + p->reachable_time = 2051 + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2046 2052 break; 2047 2053 case NDTPA_GC_STALETIME: 2048 2054 NEIGH_VAR_SET(p, GC_STALETIME, ··· 2927 2921 return ret; 2928 2922 } 2929 2923 2924 + static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, 2925 + void __user *buffer, 2926 + size_t *lenp, loff_t *ppos) 2927 + { 2928 + struct neigh_parms *p = ctl->extra2; 2929 + int ret; 2930 + 2931 + if (strcmp(ctl->procname, "base_reachable_time") == 0) 2932 + ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); 2933 + else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) 2934 + ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); 2935 + else 2936 + ret = -1; 2937 + 2938 + if (write && ret == 0) { 2939 + /* update reachable_time as well, otherwise, the change will 2940 + * only be effective after the next time neigh_periodic_work 2941 + * decides to recompute it 2942 + */ 2943 + p->reachable_time = 2944 + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); 2945 + } 2946 + return ret; 2947 + } 2948 + 2930 2949 #define NEIGH_PARMS_DATA_OFFSET(index) \ 2931 2950 (&((struct neigh_parms *) 0)->data[index]) 2932 2951 ··· 3078 3047 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; 3079 3048 /* ReachableTime (in milliseconds) */ 3080 3049 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; 3050 + } else { 3051 + /* Those handlers will update p->reachable_time after 3052 + * base_reachable_time(_ms) is set to ensure the new timer starts being 3053 + * applied after the next neighbour update instead of waiting for 3054 + * neigh_periodic_work to update its value (can be multiple minutes) 3055 + * So any handler that replaces them should do this as well 3056 + */ 3057 + /* ReachableTime */ 3058 + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = 3059 + neigh_proc_base_reachable_time; 3060 + /* ReachableTime (in milliseconds) */ 3061 + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = 3062 + neigh_proc_base_reachable_time; 3081 3063 } 3082 3064 3083 3065 /* Don't export sysctls to unprivileged users */
+1
net/core/skbuff.c
··· 4148 4148 skb->ignore_df = 0; 4149 4149 skb_dst_drop(skb); 4150 4150 skb->mark = 0; 4151 + skb_init_secmark(skb); 4151 4152 secpath_reset(skb); 4152 4153 nf_reset(skb); 4153 4154 nf_reset_trace(skb);
+5 -1
net/ipv4/geneve.c
··· 122 122 int err; 123 123 124 124 skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); 125 + if (IS_ERR(skb)) 126 + return PTR_ERR(skb); 125 127 126 128 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 127 129 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) 128 130 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 129 131 130 132 err = skb_cow_head(skb, min_headroom); 131 - if (unlikely(err)) 133 + if (unlikely(err)) { 134 + kfree_skb(skb); 132 135 return err; 136 + } 133 137 134 138 skb = vlan_hwaccel_push_inside(skb); 135 139 if (unlikely(!skb))
+4 -4
net/ipv4/netfilter/nft_redir_ipv4.c
··· 27 27 28 28 memset(&mr, 0, sizeof(mr)); 29 29 if (priv->sreg_proto_min) { 30 - mr.range[0].min.all = (__force __be16) 31 - data[priv->sreg_proto_min].data[0]; 32 - mr.range[0].max.all = (__force __be16) 33 - data[priv->sreg_proto_max].data[0]; 30 + mr.range[0].min.all = 31 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 32 + mr.range[0].max.all = 33 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 34 34 mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 35 35 } 36 36
+2 -2
net/ipv4/tcp_output.c
··· 2019 2019 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 2020 2020 break; 2021 2021 2022 - if (tso_segs == 1) { 2022 + if (tso_segs == 1 || !max_segs) { 2023 2023 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2024 2024 (tcp_skb_is_last(sk, skb) ? 2025 2025 nonagle : TCP_NAGLE_PUSH)))) ··· 2032 2032 } 2033 2033 2034 2034 limit = mss_now; 2035 - if (tso_segs > 1 && !tcp_urg_mode(tp)) 2035 + if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) 2036 2036 limit = tcp_mss_split_point(sk, skb, mss_now, 2037 2037 min_t(unsigned int, 2038 2038 cwnd_quota,
+4 -4
net/ipv6/netfilter/nft_redir_ipv6.c
··· 27 27 28 28 memset(&range, 0, sizeof(range)); 29 29 if (priv->sreg_proto_min) { 30 - range.min_proto.all = (__force __be16) 31 - data[priv->sreg_proto_min].data[0]; 32 - range.max_proto.all = (__force __be16) 33 - data[priv->sreg_proto_max].data[0]; 30 + range.min_proto.all = 31 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 32 + range.max_proto.all = 33 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 34 34 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 35 35 } 36 36
+29 -16
net/ipv6/tcp_ipv6.c
··· 1387 1387 return 0; 1388 1388 } 1389 1389 1390 + static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, 1391 + const struct tcphdr *th) 1392 + { 1393 + /* This is tricky: we move IP6CB at its correct location into 1394 + * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because 1395 + * _decode_session6() uses IP6CB(). 1396 + * barrier() makes sure compiler won't play aliasing games. 1397 + */ 1398 + memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), 1399 + sizeof(struct inet6_skb_parm)); 1400 + barrier(); 1401 + 1402 + TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1403 + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1404 + skb->len - th->doff*4); 1405 + TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1406 + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); 1407 + TCP_SKB_CB(skb)->tcp_tw_isn = 0; 1408 + TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); 1409 + TCP_SKB_CB(skb)->sacked = 0; 1410 + } 1411 + 1390 1412 static int tcp_v6_rcv(struct sk_buff *skb) 1391 1413 { 1392 1414 const struct tcphdr *th; ··· 1440 1418 1441 1419 th = tcp_hdr(skb); 1442 1420 hdr = ipv6_hdr(skb); 1443 - /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() 1444 - * barrier() makes sure compiler wont play fool^Waliasing games. 1445 - */ 1446 - memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), 1447 - sizeof(struct inet6_skb_parm)); 1448 - barrier(); 1449 - 1450 - TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1451 - TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1452 - skb->len - th->doff*4); 1453 - TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1454 - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); 1455 - TCP_SKB_CB(skb)->tcp_tw_isn = 0; 1456 - TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); 1457 - TCP_SKB_CB(skb)->sacked = 0; 1458 1421 1459 1422 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, 1460 - tcp_v6_iif(skb)); 1423 + inet6_iif(skb)); 1461 1424 if (!sk) 1462 1425 goto no_tcp_socket; 1463 1426 ··· 1457 1450 1458 1451 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1459 1452 goto discard_and_relse; 1453 + 1454 + tcp_v6_fill_cb(skb, hdr, th); 1460 1455 1461 1456 #ifdef CONFIG_TCP_MD5SIG 1462 1457 if (tcp_v6_inbound_md5_hash(sk, skb)) ··· 1491 1482 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1492 1483 goto discard_it; 1493 1484 1485 + tcp_v6_fill_cb(skb, hdr, th); 1486 + 1494 1487 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1495 1488 csum_error: 1496 1489 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); ··· 1515 1504 inet_twsk_put(inet_twsk(sk)); 1516 1505 goto discard_it; 1517 1506 } 1507 + 1508 + tcp_v6_fill_cb(skb, hdr, th); 1518 1509 1519 1510 if (skb->len < (th->doff<<2)) { 1520 1511 inet_twsk_put(inet_twsk(sk));
+9 -3
net/mac80211/key.c
··· 140 140 if (!ret) { 141 141 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 142 142 143 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 143 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 144 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 145 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 144 146 sdata->crypto_tx_tailroom_needed_cnt--; 145 147 146 148 WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && ··· 190 188 sta = key->sta; 191 189 sdata = key->sdata; 192 190 193 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 191 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 192 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 193 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 194 194 increment_tailroom_need_count(sdata); 195 195 196 196 ret = drv_set_key(key->local, DISABLE_KEY, sdata, ··· 888 884 if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 889 885 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 890 886 891 - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) 887 + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || 888 + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || 889 + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) 892 890 increment_tailroom_need_count(key->sdata); 893 891 } 894 892
+1 -4
net/mpls/mpls_gso.c
··· 31 31 SKB_GSO_TCPV6 | 32 32 SKB_GSO_UDP | 33 33 SKB_GSO_DODGY | 34 - SKB_GSO_TCP_ECN | 35 - SKB_GSO_GRE | 36 - SKB_GSO_GRE_CSUM | 37 - SKB_GSO_IPIP))) 34 + SKB_GSO_TCP_ECN))) 38 35 goto out; 39 36 40 37 /* Setup inner SKB. */
+5 -5
net/netfilter/ipvs/ip_vs_ftp.c
··· 183 183 struct nf_conn *ct; 184 184 struct net *net; 185 185 186 + *diff = 0; 187 + 186 188 #ifdef CONFIG_IP_VS_IPV6 187 189 /* This application helper doesn't work with IPv6 yet, 188 190 * so turn this into a no-op for IPv6 packets ··· 192 190 if (cp->af == AF_INET6) 193 191 return 1; 194 192 #endif 195 - 196 - *diff = 0; 197 193 198 194 /* Only useful for established sessions */ 199 195 if (cp->state != IP_VS_TCP_S_ESTABLISHED) ··· 322 322 struct ip_vs_conn *n_cp; 323 323 struct net *net; 324 324 325 + /* no diff required for incoming packets */ 326 + *diff = 0; 327 + 325 328 #ifdef CONFIG_IP_VS_IPV6 326 329 /* This application helper doesn't work with IPv6 yet, 327 330 * so turn this into a no-op for IPv6 packets ··· 332 329 if (cp->af == AF_INET6) 333 330 return 1; 334 331 #endif 335 - 336 - /* no diff required for incoming packets */ 337 - *diff = 0; 338 332 339 333 /* Only useful for established sessions */ 340 334 if (cp->state != IP_VS_TCP_S_ESTABLISHED)
+9 -11
net/netfilter/nf_conntrack_core.c
··· 611 611 */ 612 612 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 613 613 pr_debug("Confirming conntrack %p\n", ct); 614 - /* We have to check the DYING flag inside the lock to prevent 615 - a race against nf_ct_get_next_corpse() possibly called from 616 - user context, else we insert an already 'dead' hash, blocking 617 - further use of that particular connection -JM */ 614 + /* We have to check the DYING flag after unlink to prevent 615 + * a race against nf_ct_get_next_corpse() possibly called from 616 + * user context, else we insert an already 'dead' hash, blocking 617 + * further use of that particular connection -JM. 618 + */ 619 + nf_ct_del_from_dying_or_unconfirmed_list(ct); 618 620 619 - if (unlikely(nf_ct_is_dying(ct))) { 620 - nf_conntrack_double_unlock(hash, reply_hash); 621 - local_bh_enable(); 622 - return NF_ACCEPT; 623 - } 621 + if (unlikely(nf_ct_is_dying(ct))) 622 + goto out; 624 623 625 624 /* See if there's one in the list already, including reverse: 626 625 NAT could have grabbed it without realizing, since we're ··· 634 635 &h->tuple) && 635 636 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) 636 637 goto out; 637 - 638 - nf_ct_del_from_dying_or_unconfirmed_list(ct); 639 638 640 639 /* Timer relative to confirmation time, not original 641 640 setting time, otherwise we'd get timer wrap in ··· 670 673 return NF_ACCEPT; 671 674 672 675 out: 676 + nf_ct_add_to_dying_list(ct); 673 677 nf_conntrack_double_unlock(hash, reply_hash); 674 678 NF_CT_STAT_INC(net, insert_failed); 675 679 local_bh_enable();
+9 -5
net/netfilter/nf_tables_api.c
··· 713 713 struct nft_chain *chain, *nc; 714 714 struct nft_set *set, *ns; 715 715 716 - list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 716 + list_for_each_entry(chain, &ctx->table->chains, list) { 717 717 ctx->chain = chain; 718 718 719 719 err = nft_delrule_by_chain(ctx); 720 - if (err < 0) 721 - goto out; 722 - 723 - err = nft_delchain(ctx); 724 720 if (err < 0) 725 721 goto out; 726 722 } ··· 727 731 continue; 728 732 729 733 err = nft_delset(ctx, set); 734 + if (err < 0) 735 + goto out; 736 + } 737 + 738 + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { 739 + ctx->chain = chain; 740 + 741 + err = nft_delchain(ctx); 730 742 if (err < 0) 731 743 goto out; 732 744 }
+4 -3
net/netfilter/nfnetlink.c
··· 321 321 nlh = nlmsg_hdr(skb); 322 322 err = 0; 323 323 324 - if (nlh->nlmsg_len < NLMSG_HDRLEN) { 324 + if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || 325 + skb->len < nlh->nlmsg_len) { 325 326 err = -EINVAL; 326 327 goto ack; 327 328 } ··· 464 463 } 465 464 466 465 #ifdef CONFIG_MODULES 467 - static int nfnetlink_bind(int group) 466 + static int nfnetlink_bind(struct net *net, int group) 468 467 { 469 468 const struct nfnetlink_subsystem *ss; 470 469 int type; 471 470 472 471 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) 473 - return -EINVAL; 472 + return 0; 474 473 475 474 type = nfnl_group2type[group]; 476 475
+4 -4
net/netfilter/nft_nat.c
··· 65 65 } 66 66 67 67 if (priv->sreg_proto_min) { 68 - range.min_proto.all = (__force __be16) 69 - data[priv->sreg_proto_min].data[0]; 70 - range.max_proto.all = (__force __be16) 71 - data[priv->sreg_proto_max].data[0]; 68 + range.min_proto.all = 69 + *(__be16 *)&data[priv->sreg_proto_min].data[0]; 70 + range.max_proto.all = 71 + *(__be16 *)&data[priv->sreg_proto_max].data[0]; 72 72 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; 73 73 } 74 74
+23 -15
net/netlink/af_netlink.c
··· 1091 1091 mutex_unlock(&nl_sk_hash_lock); 1092 1092 1093 1093 netlink_table_grab(); 1094 - if (nlk_sk(sk)->subscriptions) 1094 + if (nlk_sk(sk)->subscriptions) { 1095 1095 __sk_del_bind_node(sk); 1096 + netlink_update_listeners(sk); 1097 + } 1096 1098 netlink_table_ungrab(); 1097 1099 } 1098 1100 ··· 1141 1139 struct module *module = NULL; 1142 1140 struct mutex *cb_mutex; 1143 1141 struct netlink_sock *nlk; 1144 - int (*bind)(int group); 1145 - void (*unbind)(int group); 1142 + int (*bind)(struct net *net, int group); 1143 + void (*unbind)(struct net *net, int group); 1146 1144 int err = 0; 1147 1145 1148 1146 sock->state = SS_UNCONNECTED; ··· 1228 1226 1229 1227 module_put(nlk->module); 1230 1228 1231 - netlink_table_grab(); 1232 1229 if (netlink_is_kernel(sk)) { 1230 + netlink_table_grab(); 1233 1231 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 1234 1232 if (--nl_table[sk->sk_protocol].registered == 0) { 1235 1233 struct listeners *old; ··· 1243 1241 nl_table[sk->sk_protocol].flags = 0; 1244 1242 nl_table[sk->sk_protocol].registered = 0; 1245 1243 } 1246 - } else if (nlk->subscriptions) { 1247 - netlink_update_listeners(sk); 1244 + netlink_table_ungrab(); 1248 1245 } 1249 - netlink_table_ungrab(); 1250 1246 1247 + if (nlk->netlink_unbind) { 1248 + int i; 1249 + 1250 + for (i = 0; i < nlk->ngroups; i++) 1251 + if (test_bit(i, nlk->groups)) 1252 + nlk->netlink_unbind(sock_net(sk), i + 1); 1253 + } 1251 1254 kfree(nlk->groups); 1252 1255 nlk->groups = NULL; 1253 1256 ··· 1417 1410 return err; 1418 1411 } 1419 1412 1420 - static void netlink_unbind(int group, long unsigned int groups, 1421 - struct netlink_sock *nlk) 1413 + static void netlink_undo_bind(int group, long unsigned int groups, 1414 + struct sock *sk) 1422 1415 { 1416 + struct netlink_sock *nlk = nlk_sk(sk); 1423 1417 int undo; 1424 1418 1425 1419 if (!nlk->netlink_unbind) ··· 1428 1420 1429 1421 for (undo = 0; undo < group; undo++) 1430 1422 if (test_bit(undo, &groups)) 1431 - nlk->netlink_unbind(undo); 1423 + nlk->netlink_unbind(sock_net(sk), undo); 1432 1424 } 1433 1425 1434 1426 static int netlink_bind(struct socket *sock, struct sockaddr *addr, ··· 1466 1458 for (group = 0; group < nlk->ngroups; group++) { 1467 1459 if (!test_bit(group, &groups)) 1468 1460 continue; 1469 - err = nlk->netlink_bind(group); 1461 + err = nlk->netlink_bind(net, group); 1470 1462 if (!err) 1471 1463 continue; 1472 - netlink_unbind(group, groups, nlk); 1464 + netlink_undo_bind(group, groups, sk); 1473 1465 return err; 1474 1466 } 1475 1467 } ··· 1479 1471 netlink_insert(sk, net, nladdr->nl_pid) : 1480 1472 netlink_autobind(sock); 1481 1473 if (err) { 1482 - netlink_unbind(nlk->ngroups, groups, nlk); 1474 + netlink_undo_bind(nlk->ngroups, groups, sk); 1483 1475 return err; 1484 1476 } 1485 1477 } ··· 2130 2122 if (!val || val - 1 >= nlk->ngroups) 2131 2123 return -EINVAL; 2132 2124 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { 2133 - err = nlk->netlink_bind(val); 2125 + err = nlk->netlink_bind(sock_net(sk), val); 2134 2126 if (err) 2135 2127 return err; 2136 2128 } ··· 2139 2131 optname == NETLINK_ADD_MEMBERSHIP); 2140 2132 netlink_table_ungrab(); 2141 2133 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) 2142 - nlk->netlink_unbind(val); 2134 + nlk->netlink_unbind(sock_net(sk), val); 2143 2135 2144 2136 err = 0; 2145 2137 break;
+4 -4
net/netlink/af_netlink.h
··· 39 39 struct mutex *cb_mutex; 40 40 struct mutex cb_def_mutex; 41 41 void (*netlink_rcv)(struct sk_buff *skb); 42 - int (*netlink_bind)(int group); 43 - void (*netlink_unbind)(int group); 42 + int (*netlink_bind)(struct net *net, int group); 43 + void (*netlink_unbind)(struct net *net, int group); 44 44 struct module *module; 45 45 #ifdef CONFIG_NETLINK_MMAP 46 46 struct mutex pg_vec_lock; ··· 65 65 unsigned int groups; 66 66 struct mutex *cb_mutex; 67 67 struct module *module; 68 - int (*bind)(int group); 69 - void (*unbind)(int group); 68 + int (*bind)(struct net *net, int group); 69 + void (*unbind)(struct net *net, int group); 70 70 bool (*compare)(struct net *net, struct sock *sock); 71 71 int registered; 72 72 };
+56
net/netlink/genetlink.c
··· 983 983 { .name = "notify", }, 984 984 }; 985 985 986 + static int genl_bind(struct net *net, int group) 987 + { 988 + int i, err = 0; 989 + 990 + down_read(&cb_lock); 991 + for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 992 + struct genl_family *f; 993 + 994 + list_for_each_entry(f, genl_family_chain(i), family_list) { 995 + if (group >= f->mcgrp_offset && 996 + group < f->mcgrp_offset + f->n_mcgrps) { 997 + int fam_grp = group - f->mcgrp_offset; 998 + 999 + if (!f->netnsok && net != &init_net) 1000 + err = -ENOENT; 1001 + else if (f->mcast_bind) 1002 + err = f->mcast_bind(net, fam_grp); 1003 + else 1004 + err = 0; 1005 + break; 1006 + } 1007 + } 1008 + } 1009 + up_read(&cb_lock); 1010 + 1011 + return err; 1012 + } 1013 + 1014 + static void genl_unbind(struct net *net, int group) 1015 + { 1016 + int i; 1017 + bool found = false; 1018 + 1019 + down_read(&cb_lock); 1020 + for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { 1021 + struct genl_family *f; 1022 + 1023 + list_for_each_entry(f, genl_family_chain(i), family_list) { 1024 + if (group >= f->mcgrp_offset && 1025 + group < f->mcgrp_offset + f->n_mcgrps) { 1026 + int fam_grp = group - f->mcgrp_offset; 1027 + 1028 + if (f->mcast_unbind) 1029 + f->mcast_unbind(net, fam_grp); 1030 + found = true; 1031 + break; 1032 + } 1033 + } 1034 + } 1035 + up_read(&cb_lock); 1036 + 1037 + WARN_ON(!found); 1038 + } 1039 + 986 1040 static int __net_init genl_pernet_init(struct net *net) 987 1041 { 988 1042 struct netlink_kernel_cfg cfg = { 989 1043 .input = genl_rcv, 990 1044 .flags = NL_CFG_F_NONROOT_RECV, 1045 + .bind = genl_bind, 1046 + .unbind = genl_unbind, 991 1047 }; 992 1048 993 1049 /* we'll bump the group number right afterwards */
+2 -1
net/openvswitch/actions.c
··· 147 147 hdr = eth_hdr(skb); 148 148 hdr->h_proto = mpls->mpls_ethertype; 149 149 150 - skb_set_inner_protocol(skb, skb->protocol); 150 + if (!skb->inner_protocol) 151 + skb_set_inner_protocol(skb, skb->protocol); 151 152 skb->protocol = mpls->mpls_ethertype; 152 153 153 154 invalidate_flow_key(key);
+3 -3
net/openvswitch/datapath.c
··· 83 83 unsigned int group) 84 84 { 85 85 return info->nlhdr->nlmsg_flags & NLM_F_ECHO || 86 - genl_has_listeners(family, genl_info_net(info)->genl_sock, 87 - group); 86 + genl_has_listeners(family, genl_info_net(info), group); 88 87 } 89 88 90 89 static void ovs_notify(struct genl_family *family, ··· 524 525 struct vport *input_vport; 525 526 int len; 526 527 int err; 527 - bool log = !a[OVS_FLOW_ATTR_PROBE]; 528 + bool log = !a[OVS_PACKET_ATTR_PROBE]; 528 529 529 530 err = -EINVAL; 530 531 if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || ··· 610 611 [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, 611 612 [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, 612 613 [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, 614 + [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, 613 615 }; 614 616 615 617 static const struct genl_ops dp_packet_genl_ops[] = {
+3 -2
net/openvswitch/flow.c
··· 70 70 { 71 71 struct flow_stats *stats; 72 72 int node = numa_node_id(); 73 + int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 73 74 74 75 stats = rcu_dereference(flow->stats[node]); 75 76 ··· 106 105 if (likely(new_stats)) { 107 106 new_stats->used = jiffies; 108 107 new_stats->packet_count = 1; 109 - new_stats->byte_count = skb->len; 108 + new_stats->byte_count = len; 110 109 new_stats->tcp_flags = tcp_flags; 111 110 spin_lock_init(&new_stats->lock); 112 111 ··· 121 120 122 121 stats->used = jiffies; 123 122 stats->packet_count++; 124 - stats->byte_count += skb->len; 123 + stats->byte_count += len; 125 124 stats->tcp_flags |= tcp_flags; 126 125 unlock: 127 126 spin_unlock(&stats->lock);
+1 -12
net/openvswitch/flow_netlink.c
··· 1753 1753 __be16 eth_type, __be16 vlan_tci, bool log) 1754 1754 { 1755 1755 const struct nlattr *a; 1756 - bool out_tnl_port = false; 1757 1756 int rem, err; 1758 1757 1759 1758 if (depth >= SAMPLE_ACTION_DEPTH) ··· 1795 1796 case OVS_ACTION_ATTR_OUTPUT: 1796 1797 if (nla_get_u32(a) >= DP_MAX_PORTS) 1797 1798 return -EINVAL; 1798 - out_tnl_port = false; 1799 - 1800 1799 break; 1801 1800 1802 1801 case OVS_ACTION_ATTR_HASH: { ··· 1828 1831 1829 1832 case OVS_ACTION_ATTR_PUSH_MPLS: { 1830 1833 const struct ovs_action_push_mpls *mpls = nla_data(a); 1831 - 1832 - /* Networking stack do not allow simultaneous Tunnel 1833 - * and MPLS GSO. 1834 - */ 1835 - if (out_tnl_port) 1836 - return -EINVAL; 1837 1834 1838 1835 if (!eth_p_mpls(mpls->mpls_ethertype)) 1839 1836 return -EINVAL; ··· 1864 1873 1865 1874 case OVS_ACTION_ATTR_SET: 1866 1875 err = validate_set(a, key, sfa, 1867 - &out_tnl_port, eth_type, log); 1876 + &skip_copy, eth_type, log); 1868 1877 if (err) 1869 1878 return err; 1870 - 1871 - skip_copy = out_tnl_port; 1872 1879 break; 1873 1880 1874 1881 case OVS_ACTION_ATTR_SAMPLE:
+3
net/openvswitch/vport-geneve.c
··· 219 219 false); 220 220 if (err < 0) 221 221 ip_rt_put(rt); 222 + return err; 223 + 222 224 error: 225 + kfree_skb(skb); 223 226 return err; 224 227 } 225 228
+11 -7
net/openvswitch/vport-gre.c
··· 73 73 74 74 skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); 75 75 if (IS_ERR(skb)) 76 - return NULL; 76 + return skb; 77 77 78 78 tpi.flags = filter_tnl_flags(tun_key->tun_flags); 79 79 tpi.proto = htons(ETH_P_TEB); ··· 144 144 145 145 if (unlikely(!OVS_CB(skb)->egress_tun_info)) { 146 146 err = -EINVAL; 147 - goto error; 147 + goto err_free_skb; 148 148 } 149 149 150 150 tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; ··· 157 157 fl.flowi4_proto = IPPROTO_GRE; 158 158 159 159 rt = ip_route_output_key(net, &fl); 160 - if (IS_ERR(rt)) 161 - return PTR_ERR(rt); 160 + if (IS_ERR(rt)) { 161 + err = PTR_ERR(rt); 162 + goto err_free_skb; 163 + } 162 164 163 165 tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); 164 166 ··· 185 183 186 184 /* Push Tunnel header. */ 187 185 skb = __build_header(skb, tunnel_hlen); 188 - if (unlikely(!skb)) { 189 - err = 0; 186 + if (IS_ERR(skb)) { 187 + err = PTR_ERR(skb); 188 + skb = NULL; 190 189 goto err_free_rt; 191 190 } 192 191 ··· 201 198 tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); 202 199 err_free_rt: 203 200 ip_rt_put(rt); 204 - error: 201 + err_free_skb: 202 + kfree_skb(skb); 205 203 return err; 206 204 } 207 205
+2
net/openvswitch/vport-vxlan.c
··· 187 187 false); 188 188 if (err < 0) 189 189 ip_rt_put(rt); 190 + return err; 190 191 error: 192 + kfree_skb(skb); 191 193 return err; 192 194 } 193 195
+3 -4
net/openvswitch/vport.c
··· 480 480 stats = this_cpu_ptr(vport->percpu_stats); 481 481 u64_stats_update_begin(&stats->syncp); 482 482 stats->rx_packets++; 483 - stats->rx_bytes += skb->len; 483 + stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); 484 484 u64_stats_update_end(&stats->syncp); 485 485 486 486 OVS_CB(skb)->input_vport = vport; ··· 519 519 u64_stats_update_end(&stats->syncp); 520 520 } else if (sent < 0) { 521 521 ovs_vport_record_error(vport, VPORT_E_TX_ERROR); 522 - kfree_skb(skb); 523 - } else 522 + } else { 524 523 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); 525 - 524 + } 526 525 return sent; 527 526 } 528 527
+8 -5
net/packet/af_packet.c
··· 785 785 786 786 struct tpacket3_hdr *last_pkt; 787 787 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1; 788 + struct sock *sk = &po->sk; 788 789 789 790 if (po->stats.stats3.tp_drops) 790 791 status |= TP_STATUS_LOSING; ··· 809 808 810 809 /* Flush the block */ 811 810 prb_flush_block(pkc1, pbd1, status); 811 + 812 + sk->sk_data_ready(sk); 812 813 813 814 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1); 814 815 } ··· 2055 2052 smp_wmb(); 2056 2053 #endif 2057 2054 2058 - if (po->tp_version <= TPACKET_V2) 2055 + if (po->tp_version <= TPACKET_V2) { 2059 2056 __packet_set_status(po, h.raw, status); 2060 - else 2057 + sk->sk_data_ready(sk); 2058 + } else { 2061 2059 prb_clear_blk_fill_status(&po->rx_ring); 2062 - 2063 - sk->sk_data_ready(sk); 2060 + } 2064 2061 2065 2062 drop_n_restore: 2066 2063 if (skb_head != skb->data && skb_shared(skb)) { ··· 2517 2514 err = -EINVAL; 2518 2515 if (sock->type == SOCK_DGRAM) { 2519 2516 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2520 - if (unlikely(offset) < 0) 2517 + if (unlikely(offset < 0)) 2521 2518 goto out_free; 2522 2519 } else { 2523 2520 if (ll_header_truncated(dev, len))
+3 -3
net/sunrpc/xdr.c
··· 606 606 struct kvec *head = buf->head; 607 607 struct kvec *tail = buf->tail; 608 608 int fraglen; 609 - int new, old; 609 + int new; 610 610 611 611 if (len > buf->len) { 612 612 WARN_ON_ONCE(1); ··· 629 629 buf->len -= fraglen; 630 630 631 631 new = buf->page_base + buf->page_len; 632 - old = new + fraglen; 633 - xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); 632 + 633 + xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); 634 634 635 635 if (buf->page_len) { 636 636 xdr->p = page_address(*xdr->page_ptr);
+3 -2
net/tipc/bcast.c
··· 220 220 struct sk_buff *skb; 221 221 222 222 skb_queue_walk(&bcl->outqueue, skb) { 223 - if (more(buf_seqno(skb), after)) 223 + if (more(buf_seqno(skb), after)) { 224 + tipc_link_retransmit(bcl, skb, mod(to - after)); 224 225 break; 226 + } 225 227 } 226 - tipc_link_retransmit(bcl, skb, mod(to - after)); 227 228 } 228 229 229 230 /**
+1 -1
net/wireless/Kconfig
··· 175 175 Most distributions have a CRDA package. So if unsure, say N. 176 176 177 177 config CFG80211_WEXT 178 - bool 178 + bool "cfg80211 wireless extensions compatibility" 179 179 depends on CFG80211 180 180 select WEXT_CORE 181 181 help
+8 -8
scripts/Makefile.clean
··· 42 42 43 43 __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) 44 44 45 - # as clean-files is given relative to the current directory, this adds 46 - # a $(obj) prefix, except for absolute paths 45 + # clean-files is given relative to the current directory, unless it 46 + # starts with $(objtree)/ (which means "./", so do not add "./" unless 47 + # you want to delete a file from the toplevel object directory). 47 48 48 49 __clean-files := $(wildcard \ 49 - $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ 50 - $(filter /%, $(__clean-files))) 50 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ 51 + $(filter $(objtree)/%, $(__clean-files))) 51 52 52 - # as clean-dirs is given relative to the current directory, this adds 53 - # a $(obj) prefix, except for absolute paths 53 + # same as clean-files 54 54 55 55 __clean-dirs := $(wildcard \ 56 - $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ 57 - $(filter /%, $(clean-dirs))) 56 + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ 57 + $(filter $(objtree)/%, $(clean-dirs))) 58 58 59 59 # ========================================================================== 60 60
+2 -2
security/keys/gc.c
··· 148 148 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 149 149 atomic_dec(&key->user->nikeys); 150 150 151 - key_user_put(key->user); 152 - 153 151 /* now throw away the key memory */ 154 152 if (key->type->destroy) 155 153 key->type->destroy(key); 154 + 155 + key_user_put(key->user); 156 156 157 157 kfree(key->description); 158 158
+1 -1
sound/firewire/fireworks/fireworks_transaction.c
··· 124 124 spin_lock_irq(&efw->lock); 125 125 126 126 t = (struct snd_efw_transaction *)data; 127 - length = min_t(size_t, t->length * sizeof(t->length), length); 127 + length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); 128 128 129 129 if (efw->push_ptr < efw->pull_ptr) 130 130 capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
+2
sound/pci/hda/patch_hdmi.c
··· 3353 3353 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3354 3354 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3355 3355 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3356 + { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3356 3357 { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3357 3358 { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3358 3359 { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, ··· 3414 3413 MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3415 3414 MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3416 3415 MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3416 + MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3417 3417 MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3418 3418 MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3419 3419 MODULE_ALIAS("snd-hda-codec-id:11069f81");
+2 -2
sound/pci/hda/patch_sigmatel.c
··· 568 568 spec->gpio_mask; 569 569 } 570 570 if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) 571 - spec->gpio_mask &= spec->gpio_mask; 572 - if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) 573 571 spec->gpio_dir &= spec->gpio_mask; 572 + if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) 573 + spec->gpio_data &= spec->gpio_mask; 574 574 if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) 575 575 spec->eapd_mask &= spec->gpio_mask; 576 576 if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
+5 -4
sound/soc/codecs/rt5677.c
··· 784 784 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol, 785 785 struct snd_ctl_elem_value *ucontrol) 786 786 { 787 - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 788 - struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 787 + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); 788 + struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component); 789 789 790 790 ucontrol->value.integer.value[0] = rt5677->dsp_vad_en; 791 791 ··· 795 795 static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol, 796 796 struct snd_ctl_elem_value *ucontrol) 797 797 { 798 - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 799 - struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 798 + struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); 799 + struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component); 800 + struct snd_soc_codec *codec = snd_soc_component_to_codec(component); 800 801 801 802 rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0]; 802 803
+30 -19
sound/soc/dwc/designware_i2s.c
··· 209 209 210 210 switch (config->chan_nr) { 211 211 case EIGHT_CHANNEL_SUPPORT: 212 - ch_reg = 3; 213 - break; 214 212 case SIX_CHANNEL_SUPPORT: 215 - ch_reg = 2; 216 - break; 217 213 case FOUR_CHANNEL_SUPPORT: 218 - ch_reg = 1; 219 - break; 220 214 case TWO_CHANNEL_SUPPORT: 221 - ch_reg = 0; 222 215 break; 223 216 default: 224 217 dev_err(dev->dev, "channel not supported\n"); ··· 220 227 221 228 i2s_disable_channels(dev, substream->stream); 222 229 223 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 224 - i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution); 225 - i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02); 226 - irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 227 - i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30); 228 - i2s_write_reg(dev->i2s_base, TER(ch_reg), 1); 229 - } else { 230 - i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution); 231 - i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07); 232 - irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 233 - i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03); 234 - i2s_write_reg(dev->i2s_base, RER(ch_reg), 1); 230 + for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) { 231 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 232 + i2s_write_reg(dev->i2s_base, TCR(ch_reg), 233 + xfer_resolution); 234 + i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02); 235 + irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 236 + i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30); 237 + i2s_write_reg(dev->i2s_base, TER(ch_reg), 1); 238 + } else { 239 + i2s_write_reg(dev->i2s_base, RCR(ch_reg), 240 + xfer_resolution); 241 + i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07); 242 + irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg)); 243 + i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03); 244 + i2s_write_reg(dev->i2s_base, RER(ch_reg), 1); 245 + } 235 246 } 236 247 237 248 i2s_write_reg(dev->i2s_base, CCR, ccr); ··· 258 261 struct snd_soc_dai *dai) 259 262 { 260 263 snd_soc_dai_set_dma_data(dai, substream, NULL); 264 + } 265 + 266 + static int dw_i2s_prepare(struct snd_pcm_substream *substream, 267 + struct snd_soc_dai *dai) 268 + { 269 + struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); 270 + 271 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 272 + i2s_write_reg(dev->i2s_base, TXFFR, 1); 273 + else 274 + i2s_write_reg(dev->i2s_base, RXFFR, 1); 275 + 276 + return 0; 261 277 } 262 278 263 279 static int dw_i2s_trigger(struct snd_pcm_substream *substream, ··· 304 294 .startup = dw_i2s_startup, 305 295 .shutdown = dw_i2s_shutdown, 306 296 .hw_params = dw_i2s_hw_params, 297 + .prepare = dw_i2s_prepare, 307 298 .trigger = dw_i2s_trigger, 308 299 }; 309 300
+2 -2
sound/soc/intel/Kconfig
··· 89 89 90 90 config SND_SOC_INTEL_BYTCR_RT5640_MACH 91 91 tristate "ASoC Audio DSP Support for MID BYT Platform" 92 - depends on X86 92 + depends on X86 && I2C 93 93 select SND_SOC_RT5640 94 94 select SND_SST_MFLD_PLATFORM 95 95 select SND_SST_IPC_ACPI ··· 101 101 102 102 config SND_SOC_INTEL_CHT_BSW_RT5672_MACH 103 103 tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec" 104 - depends on X86_INTEL_LPSS 104 + depends on X86_INTEL_LPSS && I2C 105 105 select SND_SOC_RT5670 106 106 select SND_SST_MFLD_PLATFORM 107 107 select SND_SST_IPC_ACPI
+1 -1
sound/soc/intel/bytcr_dpcm_rt5640.c
··· 227 227 MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver"); 228 228 MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>"); 229 229 MODULE_LICENSE("GPL v2"); 230 - MODULE_ALIAS("platform:bytrt5640-audio"); 230 + MODULE_ALIAS("platform:bytt100_rt5640");
+5 -1
sound/soc/intel/sst-firmware.c
··· 763 763 /* does block span more than 1 section */ 764 764 if (ba->offset >= block->offset && ba->offset < block_end) { 765 765 766 + /* add block */ 767 + list_move(&block->list, &dsp->used_block_list); 768 + list_add(&block->module_list, block_list); 766 769 /* align ba to block boundary */ 767 - ba->offset = block->offset; 770 + ba->size -= block_end - ba->offset; 771 + ba->offset = block_end; 768 772 769 773 err = block_alloc_contiguous(dsp, ba, block_list); 770 774 if (err < 0)
+1 -1
sound/soc/intel/sst/sst_acpi.c
··· 343 343 } 344 344 345 345 static struct sst_machines sst_acpi_bytcr[] = { 346 - {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin", 346 + {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin", 347 347 &byt_rvp_platform_data }, 348 348 {}, 349 349 };
+2 -2
sound/soc/rockchip/rockchip_i2s.c
··· 454 454 455 455 i2s->playback_dma_data.addr = res->start + I2S_TXDR; 456 456 i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 457 - i2s->playback_dma_data.maxburst = 16; 457 + i2s->playback_dma_data.maxburst = 4; 458 458 459 459 i2s->capture_dma_data.addr = res->start + I2S_RXDR; 460 460 i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 461 - i2s->capture_dma_data.maxburst = 16; 461 + i2s->capture_dma_data.maxburst = 4; 462 462 463 463 i2s->dev = &pdev->dev; 464 464 dev_set_drvdata(&pdev->dev, i2s);
+1 -1
sound/soc/rockchip/rockchip_i2s.h
··· 127 127 #define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT) 128 128 #define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT) 129 129 #define I2S_DMACR_TDL_SHIFT 0 130 - #define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT) 130 + #define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT) 131 131 #define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT) 132 132 133 133 /*
+5 -9
sound/soc/soc-core.c
··· 3230 3230 const char *propname) 3231 3231 { 3232 3232 struct device_node *np = card->dev->of_node; 3233 - int num_routes, old_routes; 3233 + int num_routes; 3234 3234 struct snd_soc_dapm_route *routes; 3235 3235 int i, ret; 3236 3236 ··· 3248 3248 return -EINVAL; 3249 3249 } 3250 3250 3251 - old_routes = card->num_dapm_routes; 3252 - routes = devm_kzalloc(card->dev, 3253 - (old_routes + num_routes) * sizeof(*routes), 3251 + routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes), 3254 3252 GFP_KERNEL); 3255 3253 if (!routes) { 3256 3254 dev_err(card->dev, ··· 3256 3258 return -EINVAL; 3257 3259 } 3258 3260 3259 - memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes)); 3260 - 3261 3261 for (i = 0; i < num_routes; i++) { 3262 3262 ret = of_property_read_string_index(np, propname, 3263 - 2 * i, &routes[old_routes + i].sink); 3263 + 2 * i, &routes[i].sink); 3264 3264 if (ret) { 3265 3265 dev_err(card->dev, 3266 3266 "ASoC: Property '%s' index %d could not be read: %d\n", ··· 3266 3270 return -EINVAL; 3267 3271 } 3268 3272 ret = of_property_read_string_index(np, propname, 3269 - (2 * i) + 1, &routes[old_routes + i].source); 3273 + (2 * i) + 1, &routes[i].source); 3270 3274 if (ret) { 3271 3275 dev_err(card->dev, 3272 3276 "ASoC: Property '%s' index %d could not be read: %d\n", ··· 3275 3279 } 3276 3280 } 3277 3281 3278 - card->num_dapm_routes += num_routes; 3282 + card->num_dapm_routes = num_routes; 3279 3283 card->dapm_routes = routes; 3280 3284 3281 3285 return 0;
+1 -1
sound/usb/caiaq/audio.c
··· 816 816 return -EINVAL; 817 817 } 818 818 819 - if (cdev->n_streams < 2) { 819 + if (cdev->n_streams < 1) { 820 820 dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); 821 821 return -EINVAL; 822 822 }
+2
tools/include/asm-generic/bitops.h
··· 22 22 #error only <linux/bitops.h> can be included directly 23 23 #endif 24 24 25 + #include <asm-generic/bitops/hweight.h> 26 + 25 27 #include <asm-generic/bitops/atomic.h> 26 28 27 29 #endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
+1
tools/include/asm-generic/bitops/arch_hweight.h
··· 1 + #include "../../../../include/asm-generic/bitops/arch_hweight.h"
+1
tools/include/asm-generic/bitops/const_hweight.h
··· 1 + #include "../../../../include/asm-generic/bitops/const_hweight.h"
+7
tools/include/asm-generic/bitops/hweight.h
··· 1 + #ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ 2 + #define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ 3 + 4 + #include <asm-generic/bitops/arch_hweight.h> 5 + #include <asm-generic/bitops/const_hweight.h> 6 + 7 + #endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
+6 -1
tools/include/linux/bitops.h
··· 1 1 #ifndef _TOOLS_LINUX_BITOPS_H_ 2 2 #define _TOOLS_LINUX_BITOPS_H_ 3 3 4 + #include <asm/types.h> 4 5 #include <linux/kernel.h> 5 6 #include <linux/compiler.h> 6 - #include <asm/hweight.h> 7 7 8 8 #ifndef __WORDSIZE 9 9 #define __WORDSIZE (__SIZEOF_LONG__ * 8) ··· 18 18 #define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64)) 19 19 #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) 20 20 #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) 21 + 22 + extern unsigned int __sw_hweight8(unsigned int w); 23 + extern unsigned int __sw_hweight16(unsigned int w); 24 + extern unsigned int __sw_hweight32(unsigned int w); 25 + extern unsigned long __sw_hweight64(__u64 w); 21 26 22 27 /* 23 28 * Include this here because some architectures need generic_ffs/fls in
+1 -1
tools/lib/api/fs/debugfs.c
··· 67 67 68 68 if (statfs(debugfs, &st_fs) < 0) 69 69 return -ENOENT; 70 - else if (st_fs.f_type != (long) DEBUGFS_MAGIC) 70 + else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC) 71 71 return -ENOENT; 72 72 73 73 return 0;
+1 -1
tools/lib/api/fs/fs.c
··· 79 79 80 80 if (statfs(fs, &st_fs) < 0) 81 81 return -ENOENT; 82 - else if (st_fs.f_type != magic) 82 + else if ((long)st_fs.f_type != magic) 83 83 return -ENOENT; 84 84 85 85 return 0;
+2 -2
tools/lib/lockdep/preload.c
··· 317 317 * 318 318 * TODO: Hook into free() and add that check there as well. 319 319 */ 320 - debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); 320 + debug_check_no_locks_freed(mutex, sizeof(*mutex)); 321 321 __del_lock(__get_lock(mutex)); 322 322 return ll_pthread_mutex_destroy(mutex); 323 323 } ··· 341 341 { 342 342 try_init_preload(); 343 343 344 - debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); 344 + debug_check_no_locks_freed(rwlock, sizeof(*rwlock)); 345 345 __del_lock(__get_lock(rwlock)); 346 346 return ll_pthread_rwlock_destroy(rwlock); 347 347 }
+6
tools/perf/MANIFEST
··· 6 6 tools/lib/symbol/kallsyms.h 7 7 tools/lib/util/find_next_bit.c 8 8 tools/include/asm/bug.h 9 + tools/include/asm-generic/bitops/arch_hweight.h 9 10 tools/include/asm-generic/bitops/atomic.h 11 + tools/include/asm-generic/bitops/const_hweight.h 10 12 tools/include/asm-generic/bitops/__ffs.h 11 13 tools/include/asm-generic/bitops/__fls.h 12 14 tools/include/asm-generic/bitops/find.h 13 15 tools/include/asm-generic/bitops/fls64.h 14 16 tools/include/asm-generic/bitops/fls.h 17 + tools/include/asm-generic/bitops/hweight.h 15 18 tools/include/asm-generic/bitops.h 16 19 tools/include/linux/bitops.h 17 20 tools/include/linux/compiler.h ··· 22 19 tools/include/linux/hash.h 23 20 tools/include/linux/log2.h 24 21 tools/include/linux/types.h 22 + include/asm-generic/bitops/arch_hweight.h 23 + include/asm-generic/bitops/const_hweight.h 25 24 include/asm-generic/bitops/fls64.h 26 25 include/asm-generic/bitops/__fls.h 27 26 include/asm-generic/bitops/fls.h ··· 34 29 include/linux/hash.h 35 30 include/linux/stringify.h 36 31 lib/find_next_bit.c 32 + lib/hweight.c 37 33 lib/rbtree.c 38 34 include/linux/swab.h 39 35 arch/*/include/asm/unistd*.h
+9 -2
tools/perf/Makefile.perf
··· 232 232 LIB_H += ../../include/linux/stringify.h 233 233 LIB_H += util/include/linux/bitmap.h 234 234 LIB_H += ../include/linux/bitops.h 235 + LIB_H += ../include/asm-generic/bitops/arch_hweight.h 235 236 LIB_H += ../include/asm-generic/bitops/atomic.h 237 + LIB_H += ../include/asm-generic/bitops/const_hweight.h 236 238 LIB_H += ../include/asm-generic/bitops/find.h 237 239 LIB_H += ../include/asm-generic/bitops/fls64.h 238 240 LIB_H += ../include/asm-generic/bitops/fls.h 239 241 LIB_H += ../include/asm-generic/bitops/__ffs.h 240 242 LIB_H += ../include/asm-generic/bitops/__fls.h 243 + LIB_H += ../include/asm-generic/bitops/hweight.h 241 244 LIB_H += ../include/asm-generic/bitops.h 242 245 LIB_H += ../include/linux/compiler.h 243 246 LIB_H += ../include/linux/log2.h ··· 258 255 LIB_H += util/include/asm/asm-offsets.h 259 256 LIB_H += ../include/asm/bug.h 260 257 LIB_H += util/include/asm/byteorder.h 261 - LIB_H += util/include/asm/hweight.h 262 258 LIB_H += util/include/asm/swab.h 263 259 LIB_H += util/include/asm/system.h 264 260 LIB_H += util/include/asm/uaccess.h ··· 464 462 # Benchmark modules 465 463 BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o 466 464 BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o 467 - ifeq ($(RAW_ARCH),x86_64) 465 + ifeq ($(ARCH), x86) 466 + ifeq ($(IS_64_BIT), 1) 468 467 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o 469 468 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o 469 + endif 470 470 endif 471 471 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o 472 472 BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o ··· 745 741 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $< 746 742 747 743 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS 744 + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 745 + 746 + $(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS 748 747 $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< 749 748 750 749 $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
+11 -8
tools/perf/arch/powerpc/util/skip-callchain-idx.c
··· 103 103 return NULL; 104 104 } 105 105 106 - result = dwarf_cfi_addrframe(cfi, pc, &frame); 106 + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); 107 107 if (result) { 108 108 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); 109 109 return NULL; ··· 128 128 return NULL; 129 129 } 130 130 131 - result = dwarf_cfi_addrframe(cfi, pc, &frame); 131 + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); 132 132 if (result) { 133 133 pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); 134 134 return NULL; ··· 145 145 * yet used) 146 146 * -1 in case of errors 147 147 */ 148 - static int check_return_addr(struct dso *dso, Dwarf_Addr pc) 148 + static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) 149 149 { 150 150 int rc = -1; 151 151 Dwfl *dwfl; ··· 155 155 Dwarf_Addr start = pc; 156 156 Dwarf_Addr end = pc; 157 157 bool signalp; 158 + const char *exec_file = dso->long_name; 158 159 159 160 dwfl = dso->dwfl; 160 161 ··· 166 165 return -1; 167 166 } 168 167 169 - if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) { 170 - pr_debug("dwfl_report_offline() failed %s\n", 168 + mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1, 169 + map_start, false); 170 + if (!mod) { 171 + pr_debug("dwfl_report_elf() failed %s\n", 171 172 dwarf_errmsg(-1)); 172 173 /* 173 174 * We normally cache the DWARF debug info and never ··· 259 256 return skip_slot; 260 257 } 261 258 262 - rc = check_return_addr(dso, ip); 259 + rc = check_return_addr(dso, al.map->start, ip); 263 260 264 - pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n", 265 - dso->long_name, chain->nr, ip, rc); 261 + pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n", 262 + dso->long_name, al.sym->name, ip, rc); 266 263 267 264 if (rc == 0) { 268 265 /*
+1 -1
tools/perf/bench/sched-pipe.c
··· 19 19 #include <stdlib.h> 20 20 #include <signal.h> 21 21 #include <sys/wait.h> 22 - #include <linux/unistd.h> 23 22 #include <string.h> 24 23 #include <errno.h> 25 24 #include <assert.h> 26 25 #include <sys/time.h> 27 26 #include <sys/types.h> 27 + #include <sys/syscall.h> 28 28 29 29 #include <pthread.h> 30 30
+1 -1
tools/perf/builtin-annotate.c
··· 232 232 if (nr_samples > 0) { 233 233 total_nr_samples += nr_samples; 234 234 hists__collapse_resort(hists, NULL); 235 - hists__output_resort(hists); 235 + hists__output_resort(hists, NULL); 236 236 237 237 if (symbol_conf.event_group && 238 238 !perf_evsel__is_group_leader(pos))
+45 -1
tools/perf/builtin-diff.c
··· 545 545 return __hist_entry__cmp_compute(p_left, p_right, c); 546 546 } 547 547 548 + static int64_t 549 + hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, 550 + struct hist_entry *right __maybe_unused) 551 + { 552 + return 0; 553 + } 554 + 555 + static int64_t 556 + hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) 557 + { 558 + if (sort_compute) 559 + return 0; 560 + 561 + if (left->stat.period == right->stat.period) 562 + return 0; 563 + return left->stat.period > right->stat.period ? 1 : -1; 564 + } 565 + 566 + static int64_t 567 + hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) 568 + { 569 + return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); 570 + } 571 + 572 + static int64_t 573 + hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) 574 + { 575 + return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); 576 + } 577 + 578 + static int64_t 579 + hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) 580 + { 581 + return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); 582 + } 583 + 548 584 static void insert_hist_entry_by_compute(struct rb_root *root, 549 585 struct hist_entry *he, 550 586 int c) ··· 641 605 hists__precompute(hists); 642 606 hists__compute_resort(hists); 643 607 } else { 644 - hists__output_resort(hists); 608 + hists__output_resort(hists, NULL); 645 609 } 646 610 647 611 hists__fprintf(hists, true, 0, 0, 0, stdout); ··· 1074 1038 fmt->header = hpp__header; 1075 1039 fmt->width = hpp__width; 1076 1040 fmt->entry = hpp__entry_global; 1041 + fmt->cmp = hist_entry__cmp_nop; 1042 + fmt->collapse = hist_entry__cmp_nop; 1077 1043 1078 1044 /* TODO more colors */ 1079 1045 switch (idx) { 1080 1046 case PERF_HPP_DIFF__BASELINE: 1081 1047 fmt->color = hpp__color_baseline; 1048 + fmt->sort = hist_entry__cmp_baseline; 1082 1049 break; 1083 1050 case PERF_HPP_DIFF__DELTA: 1084 1051 fmt->color = hpp__color_delta; 1052 + fmt->sort = hist_entry__cmp_delta; 1085 1053 break; 1086 1054 case PERF_HPP_DIFF__RATIO: 1087 1055 fmt->color = hpp__color_ratio; 1056 + fmt->sort = hist_entry__cmp_ratio; 1088 1057 break; 1089 1058 case PERF_HPP_DIFF__WEIGHTED_DIFF: 1090 1059 fmt->color = hpp__color_wdiff; 1060 + fmt->sort = hist_entry__cmp_wdiff; 1091 1061 break; 1092 1062 default: 1063 + fmt->sort = hist_entry__cmp_nop; 1093 1064 break; 1094 1065 } 1095 1066 1096 1067 init_header(d, dfmt); 1097 1068 perf_hpp__column_register(fmt); 1069 + perf_hpp__register_sort_field(fmt); 1098 1070 } 1099 1071 1100 1072 static void ui_init(void)
+10 -3
tools/perf/builtin-list.c
··· 19 19 int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) 20 20 { 21 21 int i; 22 - const struct option list_options[] = { 22 + bool raw_dump = false; 23 + struct option list_options[] = { 24 + OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"), 23 25 OPT_END() 24 26 }; 25 27 const char * const list_usage[] = { ··· 29 27 NULL 30 28 }; 31 29 30 + set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN); 31 + 32 32 argc = parse_options(argc, argv, list_options, list_usage, 33 33 PARSE_OPT_STOP_AT_NON_OPTION); 34 34 35 35 setup_pager(); 36 + 37 + if (raw_dump) { 38 + print_events(NULL, true); 39 + return 0; 40 + } 36 41 37 42 if (argc == 0) { 38 43 print_events(NULL, false); ··· 62 53 print_hwcache_events(NULL, false); 63 54 else if (strcmp(argv[i], "pmu") == 0) 64 55 print_pmu_events(NULL, false); 65 - else if (strcmp(argv[i], "--raw-dump") == 0) 66 - print_events(NULL, true); 67 56 else { 68 57 char *sep = strchr(argv[i], ':'), *s; 69 58 int sep_idx;
+22 -2
tools/perf/builtin-report.c
··· 457 457 ui_progress__finish(); 458 458 } 459 459 460 + static void report__output_resort(struct report *rep) 461 + { 462 + struct ui_progress prog; 463 + struct perf_evsel *pos; 464 + 465 + ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); 466 + 467 + evlist__for_each(rep->session->evlist, pos) 468 + hists__output_resort(evsel__hists(pos), &prog); 469 + 470 + ui_progress__finish(); 471 + } 472 + 460 473 static int __cmd_report(struct report *rep) 461 474 { 462 475 int ret; ··· 518 505 if (session_done()) 519 506 return 0; 520 507 508 + /* 509 + * recalculate number of entries after collapsing since it 510 + * might be changed during the collapse phase. 511 + */ 512 + rep->nr_entries = 0; 513 + evlist__for_each(session->evlist, pos) 514 + rep->nr_entries += evsel__hists(pos)->nr_entries; 515 + 521 516 if (rep->nr_entries == 0) { 522 517 ui__error("The %s file has no samples!\n", file->path); 523 518 return 0; 524 519 } 525 520 526 - evlist__for_each(session->evlist, pos) 527 - hists__output_resort(evsel__hists(pos)); 521 + report__output_resort(rep); 528 522 529 523 return report__browse_hists(rep); 530 524 }
+2 -3
tools/perf/builtin-top.c
··· 66 66 #include <sys/utsname.h> 67 67 #include <sys/mman.h> 68 68 69 - #include <linux/unistd.h> 70 69 #include <linux/types.h> 71 70 72 71 static volatile int done; ··· 284 285 } 285 286 286 287 hists__collapse_resort(hists, NULL); 287 - hists__output_resort(hists); 288 + hists__output_resort(hists, NULL); 288 289 289 290 hists__output_recalc_col_len(hists, top->print_entries - printed); 290 291 putchar('\n'); ··· 553 554 } 554 555 555 556 hists__collapse_resort(hists, NULL); 556 - hists__output_resort(hists); 557 + hists__output_resort(hists, NULL); 557 558 } 558 559 559 560 static void *display_thread_tui(void *arg)
+1 -1
tools/perf/config/Makefile
··· 20 20 21 21 # Additional ARCH settings for x86 22 22 ifeq ($(ARCH),x86) 23 - ifeq (${IS_X86_64}, 1) 23 + ifeq (${IS_64_BIT}, 1) 24 24 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT 25 25 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S 26 26 LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
+13 -13
tools/perf/config/Makefile.arch
··· 1 1 2 2 uname_M := $(shell uname -m 2>/dev/null || echo not) 3 3 4 - ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ 4 + RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ 5 5 -e s/arm.*/arm/ -e s/sa110/arm/ \ 6 6 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 7 7 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ ··· 9 9 -e s/tile.*/tile/ ) 10 10 11 11 # Additional ARCH settings for x86 12 - ifeq ($(ARCH),i386) 13 - override ARCH := x86 12 + ifeq ($(RAW_ARCH),i386) 13 + ARCH ?= x86 14 14 endif 15 15 16 - ifeq ($(ARCH),x86_64) 17 - override ARCH := x86 18 - IS_X86_64 := 0 19 - ifeq (, $(findstring m32,$(CFLAGS))) 20 - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) 21 - RAW_ARCH := x86_64 16 + ifeq ($(RAW_ARCH),x86_64) 17 + ARCH ?= x86 18 + 19 + ifneq (, $(findstring m32,$(CFLAGS))) 20 + RAW_ARCH := x86_32 22 21 endif 23 22 endif 24 23 25 - ifeq (${IS_X86_64}, 1) 24 + ARCH ?= $(RAW_ARCH) 25 + 26 + LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) 27 + ifeq ($(LP64), 1) 26 28 IS_64_BIT := 1 27 - else ifeq ($(ARCH),x86) 28 - IS_64_BIT := 0 29 29 else 30 - IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) 30 + IS_64_BIT := 0 31 31 endif
-1
tools/perf/perf-sys.h
··· 6 6 #include <sys/syscall.h> 7 7 #include <linux/types.h> 8 8 #include <linux/perf_event.h> 9 - #include <asm/unistd.h> 10 9 11 10 #if defined(__i386__) 12 11 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+34 -2
tools/perf/tests/dwarf-unwind.c
··· 11 11 #include "thread.h" 12 12 #include "callchain.h" 13 13 14 + /* For bsearch. We try to unwind functions in shared object. */ 15 + #include <stdlib.h> 16 + 14 17 static int mmap_handler(struct perf_tool *tool __maybe_unused, 15 18 union perf_event *event, 16 19 struct perf_sample *sample __maybe_unused, ··· 31 28 mmap_handler, machine, true); 32 29 } 33 30 34 - #define MAX_STACK 6 31 + #define MAX_STACK 8 35 32 36 33 static int unwind_entry(struct unwind_entry *entry, void *arg) 37 34 { ··· 40 37 static const char *funcs[MAX_STACK] = { 41 38 "test__arch_unwind_sample", 42 39 "unwind_thread", 40 + "compare", 41 + "bsearch", 43 42 "krava_3", 44 43 "krava_2", 45 44 "krava_1", ··· 93 88 return err; 94 89 } 95 90 91 + static int global_unwind_retval = -INT_MAX; 92 + 93 + __attribute__ ((noinline)) 94 + static int compare(void *p1, void *p2) 95 + { 96 + /* Any possible value should be 'thread' */ 97 + struct thread *thread = *(struct thread **)p1; 98 + 99 + if (global_unwind_retval == -INT_MAX) 100 + global_unwind_retval = unwind_thread(thread); 101 + 102 + return p1 - p2; 103 + } 104 + 96 105 __attribute__ ((noinline)) 97 106 static int krava_3(struct thread *thread) 98 107 { 99 - return unwind_thread(thread); 108 + struct thread *array[2] = {thread, thread}; 109 + void *fp = &bsearch; 110 + /* 111 + * make _bsearch a volatile function pointer to 112 + * prevent potential optimization, which may expand 113 + * bsearch and call compare directly from this function, 114 + * instead of libc shared object. 115 + */ 116 + void *(*volatile _bsearch)(void *, void *, size_t, 117 + size_t, int (*)(void *, void *)); 118 + 119 + _bsearch = fp; 120 + _bsearch(array, &thread, 2, sizeof(struct thread **), compare); 121 + return global_unwind_retval; 100 122 } 101 123 102 124 __attribute__ ((noinline))
+34 -34
tools/perf/tests/hists_cumulate.c
··· 187 187 * function since TEST_ASSERT_VAL() returns in case of failure. 188 188 */ 189 189 hists__collapse_resort(hists, NULL); 190 - hists__output_resort(hists); 190 + hists__output_resort(hists, NULL); 191 191 192 192 if (verbose > 2) { 193 193 pr_info("use callchain: %d, cumulate callchain: %d\n", ··· 454 454 * 30.00% 10.00% perf perf [.] cmd_record 455 455 * 20.00% 0.00% bash libc [.] malloc 456 456 * 10.00% 10.00% bash [kernel] [k] page_fault 457 - * 10.00% 10.00% perf [kernel] [k] schedule 458 - * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 459 - * 10.00% 10.00% perf [kernel] [k] page_fault 460 - * 10.00% 10.00% perf libc [.] free 461 - * 10.00% 10.00% perf libc [.] malloc 462 457 * 10.00% 10.00% bash bash [.] xmalloc 458 + * 10.00% 10.00% perf [kernel] [k] page_fault 459 + * 10.00% 10.00% perf libc [.] malloc 460 + * 10.00% 10.00% perf [kernel] [k] schedule 461 + * 10.00% 10.00% perf libc [.] free 462 + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 463 463 */ 464 464 struct result expected[] = { 465 465 { 7000, 2000, "perf", "perf", "main" }, ··· 468 468 { 3000, 1000, "perf", "perf", "cmd_record" }, 469 469 { 2000, 0, "bash", "libc", "malloc" }, 470 470 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 471 - { 1000, 1000, "perf", "[kernel]", "schedule" }, 472 - { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 471 + { 1000, 1000, "bash", "bash", "xmalloc" }, 473 472 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 473 + { 1000, 1000, "perf", "[kernel]", "schedule" }, 474 474 { 1000, 1000, "perf", "libc", "free" }, 475 475 { 1000, 1000, "perf", "libc", "malloc" }, 476 - { 1000, 1000, "bash", "bash", "xmalloc" }, 476 + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 477 477 }; 478 478 479 479 symbol_conf.use_callchain = false; ··· 537 537 * malloc 538 538 * main 539 539 * 540 - * 10.00% 10.00% perf [kernel] [k] schedule 540 + * 10.00% 10.00% bash bash [.] xmalloc 541 541 * | 542 - * --- schedule 543 - * run_command 542 + * --- xmalloc 543 + * malloc 544 + * xmalloc <--- NOTE: there's a cycle 545 + * malloc 546 + * xmalloc 544 547 * main 545 548 * 546 549 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open ··· 556 553 * | 557 554 * --- page_fault 558 555 * sys_perf_event_open 556 + * run_command 557 + * main 558 + * 559 + * 10.00% 10.00% perf [kernel] [k] schedule 560 + * | 561 + * --- schedule 559 562 * run_command 560 563 * main 561 564 * ··· 579 570 * run_command 580 571 * main 581 572 * 582 - * 10.00% 10.00% bash bash [.] xmalloc 583 - * | 584 - * --- xmalloc 585 - * malloc 586 - * xmalloc <--- NOTE: there's a cycle 587 - * malloc 588 - * xmalloc 589 - * main 590 - * 591 573 */ 592 574 struct result expected[] = { 593 575 { 7000, 2000, "perf", "perf", "main" }, ··· 587 587 { 3000, 1000, "perf", "perf", "cmd_record" }, 588 588 { 2000, 0, "bash", "libc", "malloc" }, 589 589 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 590 - { 1000, 1000, "perf", "[kernel]", "schedule" }, 590 + { 1000, 1000, "bash", "bash", "xmalloc" }, 591 591 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 592 592 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 593 + { 1000, 1000, "perf", "[kernel]", "schedule" }, 593 594 { 1000, 1000, "perf", "libc", "free" }, 594 595 { 1000, 1000, "perf", "libc", "malloc" }, 595 - { 1000, 1000, "bash", "bash", "xmalloc" }, 596 596 }; 597 597 struct callchain_result expected_callchain[] = { 598 598 { ··· 622 622 { "bash", "main" }, }, 623 623 }, 624 624 { 625 - 3, { { "[kernel]", "schedule" }, 626 - { "perf", "run_command" }, 627 - { "perf", "main" }, }, 625 + 6, { { "bash", "xmalloc" }, 626 + { "libc", "malloc" }, 627 + { "bash", "xmalloc" }, 628 + { "libc", "malloc" }, 629 + { "bash", "xmalloc" }, 630 + { "bash", "main" }, }, 628 631 }, 629 632 { 630 633 3, { { "[kernel]", "sys_perf_event_open" }, ··· 637 634 { 638 635 4, { { "[kernel]", "page_fault" }, 639 636 { "[kernel]", "sys_perf_event_open" }, 637 + { "perf", "run_command" }, 638 + { "perf", "main" }, }, 639 + }, 640 + { 641 + 3, { { "[kernel]", "schedule" }, 640 642 { "perf", "run_command" }, 641 643 { "perf", "main" }, }, 642 644 }, ··· 656 648 { "perf", "cmd_record" }, 657 649 { "perf", "run_command" }, 658 650 { "perf", "main" }, }, 659 - }, 660 - { 661 - 6, { { "bash", "xmalloc" }, 662 - { "libc", "malloc" }, 663 - { "bash", "xmalloc" }, 664 - { "libc", "malloc" }, 665 - { "bash", "xmalloc" }, 666 - { "bash", "main" }, }, 667 651 }, 668 652 }; 669 653
+1 -1
tools/perf/tests/hists_filter.c
··· 138 138 struct hists *hists = evsel__hists(evsel); 139 139 140 140 hists__collapse_resort(hists, NULL); 141 - hists__output_resort(hists); 141 + hists__output_resort(hists, NULL); 142 142 143 143 if (verbose > 2) { 144 144 pr_info("Normal histogram\n");
+5 -5
tools/perf/tests/hists_output.c
··· 152 152 goto out; 153 153 154 154 hists__collapse_resort(hists, NULL); 155 - hists__output_resort(hists); 155 + hists__output_resort(hists, NULL); 156 156 157 157 if (verbose > 2) { 158 158 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 252 252 goto out; 253 253 254 254 hists__collapse_resort(hists, NULL); 255 - hists__output_resort(hists); 255 + hists__output_resort(hists, NULL); 256 256 257 257 if (verbose > 2) { 258 258 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 306 306 goto out; 307 307 308 308 hists__collapse_resort(hists, NULL); 309 - hists__output_resort(hists); 309 + hists__output_resort(hists, NULL); 310 310 311 311 if (verbose > 2) { 312 312 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 384 384 goto out; 385 385 386 386 hists__collapse_resort(hists, NULL); 387 - hists__output_resort(hists); 387 + hists__output_resort(hists, NULL); 388 388 389 389 if (verbose > 2) { 390 390 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); ··· 487 487 goto out; 488 488 489 489 hists__collapse_resort(hists, NULL); 490 - hists__output_resort(hists); 490 + hists__output_resort(hists, NULL); 491 491 492 492 if (verbose > 2) { 493 493 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+1 -1
tools/perf/ui/browsers/hists.c
··· 550 550 bool need_percent; 551 551 552 552 node = rb_first(root); 553 - need_percent = !!rb_next(node); 553 + need_percent = node && rb_next(node); 554 554 555 555 while (node) { 556 556 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+3
tools/perf/ui/hist.c
··· 204 204 if (ret) 205 205 return ret; 206 206 207 + if (a->thread != b->thread || !symbol_conf.use_callchain) 208 + return 0; 209 + 207 210 ret = b->callchain->max_depth - a->callchain->max_depth; 208 211 } 209 212 return ret;
+24 -2
tools/perf/ui/tui/setup.c
··· 1 1 #include <signal.h> 2 2 #include <stdbool.h> 3 + #ifdef HAVE_BACKTRACE_SUPPORT 4 + #include <execinfo.h> 5 + #endif 3 6 4 7 #include "../../util/cache.h" 5 8 #include "../../util/debug.h" ··· 91 88 return SLkp_getkey(); 92 89 } 93 90 91 + #ifdef HAVE_BACKTRACE_SUPPORT 92 + static void ui__signal_backtrace(int sig) 93 + { 94 + void *stackdump[32]; 95 + size_t size; 96 + 97 + ui__exit(false); 98 + psignal(sig, "perf"); 99 + 100 + printf("-------- backtrace --------\n"); 101 + size = backtrace(stackdump, ARRAY_SIZE(stackdump)); 102 + backtrace_symbols_fd(stackdump, size, STDOUT_FILENO); 103 + 104 + exit(0); 105 + } 106 + #else 107 + # define ui__signal_backtrace ui__signal 108 + #endif 109 + 94 110 static void ui__signal(int sig) 95 111 { 96 112 ui__exit(false); ··· 144 122 ui_browser__init(); 145 123 tui_progress__init(); 146 124 147 - signal(SIGSEGV, ui__signal); 148 - signal(SIGFPE, ui__signal); 125 + signal(SIGSEGV, ui__signal_backtrace); 126 + signal(SIGFPE, ui__signal_backtrace); 149 127 signal(SIGINT, ui__signal); 150 128 signal(SIGQUIT, ui__signal); 151 129 signal(SIGTERM, ui__signal);
+1 -7
tools/perf/util/annotate.h
··· 116 116 struct annotated_source *src; 117 117 }; 118 118 119 - struct sannotation { 120 - struct annotation annotation; 121 - struct symbol symbol; 122 - }; 123 - 124 119 static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) 125 120 { 126 121 return (((void *)&notes->src->histograms) + ··· 124 129 125 130 static inline struct annotation *symbol__annotation(struct symbol *sym) 126 131 { 127 - struct sannotation *a = container_of(sym, struct sannotation, symbol); 128 - return &a->annotation; 132 + return (void *)sym - symbol_conf.priv_size; 129 133 } 130 134 131 135 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
+2
tools/perf/util/cache.h
··· 71 71 extern char *perf_pathdup(const char *fmt, ...) 72 72 __attribute__((format (printf, 1, 2))); 73 73 74 + #ifndef __UCLIBC__ 74 75 /* Matches the libc/libbsd function attribute so we declare this unconditionally: */ 75 76 extern size_t strlcpy(char *dest, const char *src, size_t size); 77 + #endif 76 78 77 79 #endif /* __PERF_CACHE_H */
+30
tools/perf/util/callchain.c
··· 841 841 842 842 return bf; 843 843 } 844 + 845 + static void free_callchain_node(struct callchain_node *node) 846 + { 847 + struct callchain_list *list, *tmp; 848 + struct callchain_node *child; 849 + struct rb_node *n; 850 + 851 + list_for_each_entry_safe(list, tmp, &node->val, list) { 852 + list_del(&list->list); 853 + free(list); 854 + } 855 + 856 + n = rb_first(&node->rb_root_in); 857 + while (n) { 858 + child = container_of(n, struct callchain_node, rb_node_in); 859 + n = rb_next(n); 860 + rb_erase(&child->rb_node_in, &node->rb_root_in); 861 + 862 + free_callchain_node(child); 863 + free(child); 864 + } 865 + } 866 + 867 + void free_callchain(struct callchain_root *root) 868 + { 869 + if (!symbol_conf.use_callchain) 870 + return; 871 + 872 + free_callchain_node(&root->node); 873 + }
+2
tools/perf/util/callchain.h
··· 198 198 char *callchain_list__sym_name(struct callchain_list *cl, 199 199 char *bf, size_t bfsize, bool show_dso); 200 200 201 + void free_callchain(struct callchain_root *root); 202 + 201 203 #endif /* __PERF_CALLCHAIN_H */
+14 -4
tools/perf/util/hist.c
··· 6 6 #include "evlist.h" 7 7 #include "evsel.h" 8 8 #include "annotate.h" 9 + #include "ui/progress.h" 9 10 #include <math.h> 10 11 11 12 static bool hists__filter_entry_by_dso(struct hists *hists, ··· 304 303 size_t callchain_size = 0; 305 304 struct hist_entry *he; 306 305 307 - if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) 306 + if (symbol_conf.use_callchain) 308 307 callchain_size = sizeof(struct callchain_root); 309 308 310 309 he = zalloc(sizeof(*he) + callchain_size); ··· 737 736 iter->he = he; 738 737 he_cache[iter->curr++] = he; 739 738 740 - callchain_append(he->callchain, &callchain_cursor, sample->period); 739 + hist_entry__append_callchain(he, sample); 741 740 742 741 /* 743 742 * We need to re-initialize the cursor since callchain_append() ··· 810 809 iter->he = he; 811 810 he_cache[iter->curr++] = he; 812 811 813 - callchain_append(he->callchain, &cursor, sample->period); 812 + if (symbol_conf.use_callchain) 813 + callchain_append(he->callchain, &cursor, sample->period); 814 814 return 0; 815 815 } 816 816 ··· 947 945 zfree(&he->mem_info); 948 946 zfree(&he->stat_acc); 949 947 free_srcline(he->srcline); 948 + free_callchain(he->callchain); 950 949 free(he); 951 950 } 952 951 ··· 990 987 else 991 988 p = &(*p)->rb_right; 992 989 } 990 + hists->nr_entries++; 993 991 994 992 rb_link_node(&he->rb_node_in, parent, p); 995 993 rb_insert_color(&he->rb_node_in, root); ··· 1028 1024 if (!sort__need_collapse) 1029 1025 return; 1030 1026 1027 + hists->nr_entries = 0; 1028 + 1031 1029 root = hists__get_rotate_entries_in(hists); 1030 + 1032 1031 next = rb_first(root); 1033 1032 1034 1033 while (next) { ··· 1126 1119 rb_insert_color(&he->rb_node, entries); 1127 1120 } 1128 1121 1129 - void hists__output_resort(struct hists *hists) 1122 + void hists__output_resort(struct hists *hists, struct ui_progress *prog) 1130 1123 { 1131 1124 struct rb_root *root; 1132 1125 struct rb_node *next; ··· 1155 1148 1156 1149 if (!n->filtered) 1157 1150 hists__calc_col_len(hists, n); 1151 + 1152 + if (prog) 1153 + ui_progress__update(prog, 1); 1158 1154 } 1159 1155 } 1160 1156
+1 -1
tools/perf/util/hist.h
··· 121 121 struct hists *hists); 122 122 void hist_entry__free(struct hist_entry *); 123 123 124 - void hists__output_resort(struct hists *hists); 124 + void hists__output_resort(struct hists *hists, struct ui_progress *prog); 125 125 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); 126 126 127 127 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
-31
tools/perf/util/hweight.c
··· 1 - #include <linux/bitops.h> 2 - 3 - /** 4 - * hweightN - returns the hamming weight of a N-bit word 5 - * @x: the word to weigh 6 - * 7 - * The Hamming Weight of a number is the total number of bits set in it. 8 - */ 9 - 10 - unsigned int hweight32(unsigned int w) 11 - { 12 - unsigned int res = w - ((w >> 1) & 0x55555555); 13 - res = (res & 0x33333333) + ((res >> 2) & 0x33333333); 14 - res = (res + (res >> 4)) & 0x0F0F0F0F; 15 - res = res + (res >> 8); 16 - return (res + (res >> 16)) & 0x000000FF; 17 - } 18 - 19 - unsigned long hweight64(__u64 w) 20 - { 21 - #if BITS_PER_LONG == 32 22 - return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); 23 - #elif BITS_PER_LONG == 64 24 - __u64 res = w - ((w >> 1) & 0x5555555555555555ul); 25 - res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); 26 - res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; 27 - res = res + (res >> 8); 28 - res = res + (res >> 16); 29 - return (res + (res >> 32)) & 0x00000000000000FFul; 30 - #endif 31 - }
-8
tools/perf/util/include/asm/hweight.h
··· 1 - #ifndef PERF_HWEIGHT_H 2 - #define PERF_HWEIGHT_H 3 - 4 - #include <linux/types.h> 5 - unsigned int hweight32(unsigned int w); 6 - unsigned long hweight64(__u64 w); 7 - 8 - #endif /* PERF_HWEIGHT_H */
+3 -1
tools/perf/util/machine.c
··· 389 389 if (th != NULL) { 390 390 rb_link_node(&th->rb_node, parent, p); 391 391 rb_insert_color(&th->rb_node, &machine->threads); 392 - machine->last_match = th; 393 392 394 393 /* 395 394 * We have to initialize map_groups separately ··· 399 400 * leader and that would screwed the rb tree. 400 401 */ 401 402 if (thread__init_map_groups(th, machine)) { 403 + rb_erase(&th->rb_node, &machine->threads); 402 404 thread__delete(th); 403 405 return NULL; 404 406 } 407 + 408 + machine->last_match = th; 405 409 } 406 410 407 411 return th;
+7 -3
tools/perf/util/probe-event.c
··· 495 495 } 496 496 497 497 if (ntevs == 0) { /* No error but failed to find probe point. */ 498 - pr_warning("Probe point '%s' not found.\n", 498 + pr_warning("Probe point '%s' not found in debuginfo.\n", 499 499 synthesize_perf_probe_point(&pev->point)); 500 - return -ENOENT; 500 + if (need_dwarf) 501 + return -ENOENT; 502 + return 0; 501 503 } 502 504 /* Error path : ntevs < 0 */ 503 505 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); ··· 2052 2050 pr_debug("Writing event: %s\n", buf); 2053 2051 if (!probe_event_dry_run) { 2054 2052 ret = write(fd, buf, strlen(buf)); 2055 - if (ret <= 0) 2053 + if (ret <= 0) { 2054 + ret = -errno; 2056 2055 pr_warning("Failed to write event: %s\n", 2057 2056 strerror_r(errno, sbuf, sizeof(sbuf))); 2057 + } 2058 2058 } 2059 2059 free(buf); 2060 2060 return ret;
+17 -1
tools/perf/util/probe-finder.c
··· 989 989 int ret = 0; 990 990 991 991 #if _ELFUTILS_PREREQ(0, 142) 992 + Elf *elf; 993 + GElf_Ehdr ehdr; 994 + GElf_Shdr shdr; 995 + 992 996 /* Get the call frame information from this dwarf */ 993 - pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); 997 + elf = dwarf_getelf(dbg->dbg); 998 + if (elf == NULL) 999 + return -EINVAL; 1000 + 1001 + if (gelf_getehdr(elf, &ehdr) == NULL) 1002 + return -EINVAL; 1003 + 1004 + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && 1005 + shdr.sh_type == SHT_PROGBITS) { 1006 + pf->cfi = dwarf_getcfi_elf(elf); 1007 + } else { 1008 + pf->cfi = dwarf_getcfi(dbg->dbg); 1009 + } 994 1010 #endif 995 1011 996 1012 off = 0;
+1 -1
tools/perf/util/python-ext-sources
··· 10 10 util/evlist.c 11 11 util/evsel.c 12 12 util/cpumap.c 13 - util/hweight.c 13 + ../../lib/hweight.c 14 14 util/thread_map.c 15 15 util/util.c 16 16 util/xyarray.c
+27 -1
tools/perf/util/unwind-libunwind.c
··· 185 185 return offset; 186 186 } 187 187 188 + #ifndef NO_LIBUNWIND_DEBUG_FRAME 189 + static int elf_is_exec(int fd, const char *name) 190 + { 191 + Elf *elf; 192 + GElf_Ehdr ehdr; 193 + int retval = 0; 194 + 195 + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 196 + if (elf == NULL) 197 + return 0; 198 + if (gelf_getehdr(elf, &ehdr) == NULL) 199 + goto out; 200 + 201 + retval = (ehdr.e_type == ET_EXEC); 202 + 203 + out: 204 + elf_end(elf); 205 + pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval); 206 + return retval; 207 + } 208 + #endif 209 + 188 210 struct table_entry { 189 211 u32 start_ip_offset; 190 212 u32 fde_offset; ··· 344 322 #ifndef NO_LIBUNWIND_DEBUG_FRAME 345 323 /* Check the .debug_frame section for unwinding info */ 346 324 if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { 325 + int fd = dso__data_fd(map->dso, ui->machine); 326 + int is_exec = elf_is_exec(fd, map->dso->name); 327 + unw_word_t base = is_exec ? 0 : map->start; 328 + 347 329 memset(&di, 0, sizeof(di)); 348 - if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name, 330 + if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, 349 331 map->start, map->end)) 350 332 return dwarf_search_unwind_table(as, ip, &di, pi, 351 333 need_unwind_info, arg);
+1 -1
tools/power/cpupower/utils/cpupower.c
··· 199 199 } 200 200 201 201 get_cpu_info(0, &cpupower_cpu_info); 202 - run_as_root = !getuid(); 202 + run_as_root = !geteuid(); 203 203 if (run_as_root) { 204 204 ret = uname(&uts); 205 205 if (!ret && !strcmp(uts.machine, "x86_64") &&
+1 -1
tools/power/cpupower/utils/helpers/sysfs.c
··· 361 361 362 362 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle"); 363 363 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode)) 364 - return -ENODEV; 364 + return 0; 365 365 366 366 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu); 367 367 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
+15 -8
tools/testing/selftests/exec/execveat.c
··· 62 62 } 63 63 64 64 static int check_execveat_invoked_rc(int fd, const char *path, int flags, 65 - int expected_rc) 65 + int expected_rc, int expected_rc2) 66 66 { 67 67 int status; 68 68 int rc; ··· 98 98 child, status); 99 99 return 1; 100 100 } 101 - if (WEXITSTATUS(status) != expected_rc) { 102 - printf("[FAIL] (child %d exited with %d not %d)\n", 103 - child, WEXITSTATUS(status), expected_rc); 101 + if ((WEXITSTATUS(status) != expected_rc) && 102 + (WEXITSTATUS(status) != expected_rc2)) { 103 + printf("[FAIL] (child %d exited with %d not %d nor %d)\n", 104 + child, WEXITSTATUS(status), expected_rc, expected_rc2); 104 105 return 1; 105 106 } 106 107 printf("[OK]\n"); ··· 110 109 111 110 static int check_execveat(int fd, const char *path, int flags) 112 111 { 113 - return check_execveat_invoked_rc(fd, path, flags, 99); 112 + return check_execveat_invoked_rc(fd, path, flags, 99, 99); 114 113 } 115 114 116 115 static char *concat(const char *left, const char *right) ··· 180 179 */ 181 180 fd = open(longpath, O_RDONLY); 182 181 if (fd > 0) { 183 - printf("Invoke copy of '%s' via filename of length %lu:\n", 182 + printf("Invoke copy of '%s' via filename of length %zu:\n", 184 183 src, strlen(longpath)); 185 184 fail += check_execveat(fd, "", AT_EMPTY_PATH); 186 185 } else { 187 - printf("Failed to open length %lu filename, errno=%d (%s)\n", 186 + printf("Failed to open length %zu filename, errno=%d (%s)\n", 188 187 strlen(longpath), errno, strerror(errno)); 189 188 fail++; 190 189 } ··· 193 192 * Execute as a long pathname relative to ".". If this is a script, 194 193 * the interpreter will launch but fail to open the script because its 195 194 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. 195 + * 196 + * The failure code is usually 127 (POSIX: "If a command is not found, 197 + * the exit status shall be 127."), but some systems give 126 (POSIX: 198 + * "If the command name is found, but it is not an executable utility, 199 + * the exit status shall be 126."), so allow either. 196 200 */ 197 201 if (is_script) 198 - fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); 202 + fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 203 + 127, 126); 199 204 else 200 205 fail += check_execveat(dot_dfd, longpath, 0); 201 206
+1 -2
tools/testing/selftests/mqueue/mq_perf_tests.c
··· 536 536 { 537 537 struct mq_attr attr; 538 538 char *option, *next_option; 539 - int i, cpu; 539 + int i, cpu, rc; 540 540 struct sigaction sa; 541 541 poptContext popt_context; 542 - char rc; 543 542 void *retval; 544 543 545 544 main_thread = pthread_self();
+1 -1
tools/testing/selftests/vm/Makefile
··· 7 7 8 8 all: $(BINARIES) 9 9 %: %.c 10 - $(CC) $(CFLAGS) -o $@ $^ 10 + $(CC) $(CFLAGS) -o $@ $^ -lrt 11 11 12 12 run_tests: all 13 13 @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)