Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 3.12-rc2 into staging-next.

This resolves the merge problem with two iio drivers that Stephen
Rothwell pointed out.

Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+4055 -2271
+7 -7
Documentation/arm64/tagged-pointers.txt
··· 18 18 parameters containing user virtual addresses *must* have 19 19 their top byte cleared before trapping to the kernel. 20 20 21 - (2) Tags are not guaranteed to be preserved when delivering 22 - signals. This means that signal handlers in applications 23 - making use of tags cannot rely on the tag information for 24 - user virtual addresses being maintained for fields inside 25 - siginfo_t. One exception to this rule is for signals raised 26 - in response to debug exceptions, where the tag information 21 + (2) Non-zero tags are not preserved when delivering signals. 22 + This means that signal handlers in applications making use 23 + of tags cannot rely on the tag information for user virtual 24 + addresses being maintained for fields inside siginfo_t. 25 + One exception to this rule is for signals raised in response 26 + to watchpoint debug exceptions, where the tag information 27 27 will be preserved. 28 28 29 29 (3) Special care should be taken when using tagged pointers, 30 30 since it is likely that C compilers will not hazard two 31 - addresses differing only in the upper bits. 31 + virtual addresses differing only in the upper byte. 32 32 33 33 The architecture prevents the use of a tagged PC, so the upper byte will 34 34 be set to a sign-extension of bit 55 on exception return.
+7 -7
Documentation/filesystems/vfs.txt
··· 359 359 ssize_t (*listxattr) (struct dentry *, char *, size_t); 360 360 int (*removexattr) (struct dentry *, const char *); 361 361 void (*update_time)(struct inode *, struct timespec *, int); 362 - int (*atomic_open)(struct inode *, struct dentry *, 362 + int (*atomic_open)(struct inode *, struct dentry *, struct file *, 363 + unsigned open_flag, umode_t create_mode, int *opened); 363 364 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 364 - } ____cacheline_aligned; 365 - struct file *, unsigned open_flag, 366 - umode_t create_mode, int *opened); 367 365 }; 368 366 369 367 Again, all methods are called without any locks being held, unless ··· 468 470 method the filesystem can look up, possibly create and open the file in 469 471 one atomic operation. If it cannot perform this (e.g. the file type 470 472 turned out to be wrong) it may signal this by returning 1 instead of 471 - usual 0 or -ve . This method is only called if the last 472 - component is negative or needs lookup. Cached positive dentries are 473 - still handled by f_op->open(). 473 + usual 0 or -ve . This method is only called if the last component is 474 + negative or needs lookup. Cached positive dentries are still handled by 475 + f_op->open(). If the file was created, the FILE_CREATED flag should be 476 + set in "opened". In case of O_EXCL the method must only succeed if the 477 + file didn't exist and hence FILE_CREATED shall always be set on success. 474 478 475 479 tmpfile: called in the end of O_TMPFILE open(). Optional, equivalent to 476 480 atomically creating, opening and unlinking a file in given directory.
+6
Documentation/networking/bonding.txt
··· 1362 1362 To remove an ARP target: 1363 1363 # echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target 1364 1364 1365 + To configure the interval between learning packet transmits: 1366 + # echo 12 > /sys/class/net/bond0/bonding/lp_interval 1367 + NOTE: the lp_inteval is the number of seconds between instances where 1368 + the bonding driver sends learning packets to each slaves peer switch. The 1369 + default interval is 1 second. 1370 + 1365 1371 Example Configuration 1366 1372 --------------------- 1367 1373 We begin with the same example that is shown in section 3.3,
+1 -3
Documentation/scheduler/sched-design-CFS.txt
··· 66 66 runqueue. 67 67 68 68 CFS maintains a time-ordered rbtree, where all runnable tasks are sorted by the 69 - p->se.vruntime key (there is a subtraction using rq->cfs.min_vruntime to 70 - account for possible wraparounds). CFS picks the "leftmost" task from this 71 - tree and sticks to it. 69 + p->se.vruntime key. CFS picks the "leftmost" task from this tree and sticks to it. 72 70 As the system progresses forwards, the executed tasks are put into the tree 73 71 more and more to the right --- slowly but surely giving a chance for every task 74 72 to become the "leftmost task" and thus get on the CPU within a deterministic
+1 -1
Makefile
··· 1 1 VERSION = 3 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc1 4 + EXTRAVERSION = -rc2 5 5 NAME = One Giant Leap for Frogkind 6 6 7 7 # *DOCUMENTATION*
+1
arch/arm/boot/dts/Makefile
··· 183 183 am335x-evm.dtb \ 184 184 am335x-evmsk.dtb \ 185 185 am335x-bone.dtb \ 186 + am335x-boneblack.dtb \ 186 187 am3517-evm.dtb \ 187 188 am3517_mt_ventoux.dtb \ 188 189 am43x-epos-evm.dtb
+262
arch/arm/boot/dts/am335x-bone-common.dtsi
··· 1 + /* 2 + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + / { 10 + model = "TI AM335x BeagleBone"; 11 + compatible = "ti,am335x-bone", "ti,am33xx"; 12 + 13 + cpus { 14 + cpu@0 { 15 + cpu0-supply = <&dcdc2_reg>; 16 + }; 17 + }; 18 + 19 + memory { 20 + device_type = "memory"; 21 + reg = <0x80000000 0x10000000>; /* 256 MB */ 22 + }; 23 + 24 + am33xx_pinmux: pinmux@44e10800 { 25 + pinctrl-names = "default"; 26 + pinctrl-0 = <&clkout2_pin>; 27 + 28 + user_leds_s0: user_leds_s0 { 29 + pinctrl-single,pins = < 30 + 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */ 31 + 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */ 32 + 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */ 33 + 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */ 34 + >; 35 + }; 36 + 37 + i2c0_pins: pinmux_i2c0_pins { 38 + pinctrl-single,pins = < 39 + 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */ 40 + 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */ 41 + >; 42 + }; 43 + 44 + uart0_pins: pinmux_uart0_pins { 45 + pinctrl-single,pins = < 46 + 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */ 47 + 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */ 48 + >; 49 + }; 50 + 51 + clkout2_pin: pinmux_clkout2_pin { 52 + pinctrl-single,pins = < 53 + 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */ 54 + >; 55 + }; 56 + 57 + cpsw_default: cpsw_default { 58 + pinctrl-single,pins = < 59 + /* Slave 1 */ 60 + 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */ 61 + 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */ 62 + 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */ 63 + 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */ 64 + 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */ 65 + 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */ 66 + 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */ 67 + 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */ 68 + 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */ 69 + 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */ 70 + 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */ 71 + 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */ 72 + 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */ 73 + >; 74 + }; 75 + 76 + cpsw_sleep: cpsw_sleep { 77 + pinctrl-single,pins = < 78 + /* Slave 1 reset value */ 79 + 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7) 80 + 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7) 81 + 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7) 82 + 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7) 83 + 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7) 84 + 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7) 85 + 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7) 86 + 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7) 87 + 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7) 88 + 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7) 89 + 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7) 90 + 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7) 91 + 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7) 92 + >; 93 + }; 94 + 95 + davinci_mdio_default: davinci_mdio_default { 96 + pinctrl-single,pins = < 97 + /* MDIO */ 98 + 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */ 99 + 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */ 100 + >; 101 + }; 102 + 103 + davinci_mdio_sleep: davinci_mdio_sleep { 104 + pinctrl-single,pins = < 105 + /* MDIO reset value */ 106 + 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7) 107 + 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7) 108 + >; 109 + }; 110 + }; 111 + 112 + ocp { 113 + uart0: serial@44e09000 { 114 + pinctrl-names = "default"; 115 + pinctrl-0 = <&uart0_pins>; 116 + 117 + status = "okay"; 118 + }; 119 + 120 + musb: usb@47400000 { 121 + status = "okay"; 122 + 123 + control@44e10000 { 124 + status = "okay"; 125 + }; 126 + 127 + usb-phy@47401300 { 128 + status = "okay"; 129 + }; 130 + 131 + usb-phy@47401b00 { 132 + status = "okay"; 133 + }; 134 + 135 + usb@47401000 { 136 + status = "okay"; 137 + }; 138 + 139 + usb@47401800 { 140 + status = "okay"; 141 + dr_mode = "host"; 142 + }; 143 + 144 + dma-controller@07402000 { 145 + status = "okay"; 146 + }; 147 + }; 148 + 149 + i2c0: i2c@44e0b000 { 150 + pinctrl-names = "default"; 151 + pinctrl-0 = <&i2c0_pins>; 152 + 153 + status = "okay"; 154 + clock-frequency = <400000>; 155 + 156 + tps: tps@24 { 157 + reg = <0x24>; 158 + }; 159 + 160 + }; 161 + }; 162 + 163 + leds { 164 + pinctrl-names = "default"; 165 + pinctrl-0 = <&user_leds_s0>; 166 + 167 + compatible = "gpio-leds"; 168 + 169 + led@2 { 170 + label = "beaglebone:green:heartbeat"; 171 + gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; 172 + linux,default-trigger = "heartbeat"; 173 + default-state = "off"; 174 + }; 175 + 176 + led@3 { 177 + label = "beaglebone:green:mmc0"; 178 + gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>; 179 + linux,default-trigger = "mmc0"; 180 + default-state = "off"; 181 + }; 182 + 183 + led@4 { 184 + label = "beaglebone:green:usr2"; 185 + gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; 186 + default-state = "off"; 187 + }; 188 + 189 + led@5 { 190 + label = "beaglebone:green:usr3"; 191 + gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>; 192 + default-state = "off"; 193 + }; 194 + }; 195 + }; 196 + 197 + /include/ "tps65217.dtsi" 198 + 199 + &tps { 200 + regulators { 201 + dcdc1_reg: regulator@0 { 202 + regulator-always-on; 203 + }; 204 + 205 + dcdc2_reg: regulator@1 { 206 + /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 207 + regulator-name = "vdd_mpu"; 208 + regulator-min-microvolt = <925000>; 209 + regulator-max-microvolt = <1325000>; 210 + regulator-boot-on; 211 + regulator-always-on; 212 + }; 213 + 214 + dcdc3_reg: regulator@2 { 215 + /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 216 + regulator-name = "vdd_core"; 217 + regulator-min-microvolt = <925000>; 218 + regulator-max-microvolt = <1150000>; 219 + regulator-boot-on; 220 + regulator-always-on; 221 + }; 222 + 223 + ldo1_reg: regulator@3 { 224 + regulator-always-on; 225 + }; 226 + 227 + ldo2_reg: regulator@4 { 228 + regulator-always-on; 229 + }; 230 + 231 + ldo3_reg: regulator@5 { 232 + regulator-always-on; 233 + }; 234 + 235 + ldo4_reg: regulator@6 { 236 + regulator-always-on; 237 + }; 238 + }; 239 + }; 240 + 241 + &cpsw_emac0 { 242 + phy_id = <&davinci_mdio>, <0>; 243 + phy-mode = "mii"; 244 + }; 245 + 246 + &cpsw_emac1 { 247 + phy_id = <&davinci_mdio>, <1>; 248 + phy-mode = "mii"; 249 + }; 250 + 251 + &mac { 252 + pinctrl-names = "default", "sleep"; 253 + pinctrl-0 = <&cpsw_default>; 254 + pinctrl-1 = <&cpsw_sleep>; 255 + 256 + }; 257 + 258 + &davinci_mdio { 259 + pinctrl-names = "default", "sleep"; 260 + pinctrl-0 = <&davinci_mdio_default>; 261 + pinctrl-1 = <&davinci_mdio_sleep>; 262 + };
+1 -255
arch/arm/boot/dts/am335x-bone.dts
··· 8 8 /dts-v1/; 9 9 10 10 #include "am33xx.dtsi" 11 - 12 - / { 13 - model = "TI AM335x BeagleBone"; 14 - compatible = "ti,am335x-bone", "ti,am33xx"; 15 - 16 - cpus { 17 - cpu@0 { 18 - cpu0-supply = <&dcdc2_reg>; 19 - }; 20 - }; 21 - 22 - memory { 23 - device_type = "memory"; 24 - reg = <0x80000000 0x10000000>; /* 256 MB */ 25 - }; 26 - 27 - am33xx_pinmux: pinmux@44e10800 { 28 - pinctrl-names = "default"; 29 - pinctrl-0 = <&clkout2_pin>; 30 - 31 - user_leds_s0: user_leds_s0 { 32 - pinctrl-single,pins = < 33 - 0x54 (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */ 34 - 0x58 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */ 35 - 0x5c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */ 36 - 0x60 (PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */ 37 - >; 38 - }; 39 - 40 - i2c0_pins: pinmux_i2c0_pins { 41 - pinctrl-single,pins = < 42 - 0x188 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */ 43 - 0x18c (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */ 44 - >; 45 - }; 46 - 47 - uart0_pins: pinmux_uart0_pins { 48 - pinctrl-single,pins = < 49 - 0x170 (PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */ 50 - 0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */ 51 - >; 52 - }; 53 - 54 - clkout2_pin: pinmux_clkout2_pin { 55 - pinctrl-single,pins = < 56 - 0x1b4 (PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */ 57 - >; 58 - }; 59 - 60 - cpsw_default: cpsw_default { 61 - pinctrl-single,pins = < 62 - /* Slave 1 */ 63 - 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */ 64 - 0x114 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */ 65 - 0x118 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */ 66 - 0x11c (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */ 67 - 0x120 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */ 68 - 0x124 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */ 69 - 0x128 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */ 70 - 0x12c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */ 71 - 0x130 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */ 72 - 0x134 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */ 73 - 0x138 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */ 74 - 0x13c (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */ 75 - 0x140 (PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */ 76 - >; 77 - }; 78 - 79 - cpsw_sleep: cpsw_sleep { 80 - pinctrl-single,pins = < 81 - /* Slave 1 reset value */ 82 - 0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7) 83 - 0x114 (PIN_INPUT_PULLDOWN | MUX_MODE7) 84 - 0x118 (PIN_INPUT_PULLDOWN | MUX_MODE7) 85 - 0x11c (PIN_INPUT_PULLDOWN | MUX_MODE7) 86 - 0x120 (PIN_INPUT_PULLDOWN | MUX_MODE7) 87 - 0x124 (PIN_INPUT_PULLDOWN | MUX_MODE7) 88 - 0x128 (PIN_INPUT_PULLDOWN | MUX_MODE7) 89 - 0x12c (PIN_INPUT_PULLDOWN | MUX_MODE7) 90 - 0x130 (PIN_INPUT_PULLDOWN | MUX_MODE7) 91 - 0x134 (PIN_INPUT_PULLDOWN | MUX_MODE7) 92 - 0x138 (PIN_INPUT_PULLDOWN | MUX_MODE7) 93 - 0x13c (PIN_INPUT_PULLDOWN | MUX_MODE7) 94 - 0x140 (PIN_INPUT_PULLDOWN | MUX_MODE7) 95 - >; 96 - }; 97 - 98 - davinci_mdio_default: davinci_mdio_default { 99 - pinctrl-single,pins = < 100 - /* MDIO */ 101 - 0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */ 102 - 0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */ 103 - >; 104 - }; 105 - 106 - davinci_mdio_sleep: davinci_mdio_sleep { 107 - pinctrl-single,pins = < 108 - /* MDIO reset value */ 109 - 0x148 (PIN_INPUT_PULLDOWN | MUX_MODE7) 110 - 0x14c (PIN_INPUT_PULLDOWN | MUX_MODE7) 111 - >; 112 - }; 113 - }; 114 - 115 - ocp { 116 - uart0: serial@44e09000 { 117 - pinctrl-names = "default"; 118 - pinctrl-0 = <&uart0_pins>; 119 - 120 - status = "okay"; 121 - }; 122 - 123 - musb: usb@47400000 { 124 - status = "okay"; 125 - 126 - control@44e10000 { 127 - status = "okay"; 128 - }; 129 - 130 - usb-phy@47401300 { 131 - status = "okay"; 132 - }; 133 - 134 - usb-phy@47401b00 { 135 - status = "okay"; 136 - }; 137 - 138 - usb@47401000 { 139 - status = "okay"; 140 - }; 141 - 142 - usb@47401800 { 143 - status = "okay"; 144 - dr_mode = "host"; 145 - }; 146 - 147 - dma-controller@07402000 { 148 - status = "okay"; 149 - }; 150 - }; 151 - 152 - i2c0: i2c@44e0b000 { 153 - pinctrl-names = "default"; 154 - pinctrl-0 = <&i2c0_pins>; 155 - 156 - status = "okay"; 157 - clock-frequency = <400000>; 158 - 159 - tps: tps@24 { 160 - reg = <0x24>; 161 - }; 162 - 163 - }; 164 - }; 165 - 166 - leds { 167 - pinctrl-names = "default"; 168 - pinctrl-0 = <&user_leds_s0>; 169 - 170 - compatible = "gpio-leds"; 171 - 172 - led@2 { 173 - label = "beaglebone:green:heartbeat"; 174 - gpios = <&gpio1 21 GPIO_ACTIVE_HIGH>; 175 - linux,default-trigger = "heartbeat"; 176 - default-state = "off"; 177 - }; 178 - 179 - led@3 { 180 - label = "beaglebone:green:mmc0"; 181 - gpios = <&gpio1 22 GPIO_ACTIVE_HIGH>; 182 - linux,default-trigger = "mmc0"; 183 - default-state = "off"; 184 - }; 185 - 186 - led@4 { 187 - label = "beaglebone:green:usr2"; 188 - gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; 189 - default-state = "off"; 190 - }; 191 - 192 - led@5 { 193 - label = "beaglebone:green:usr3"; 194 - gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>; 195 - default-state = "off"; 196 - }; 197 - }; 198 - }; 199 - 200 - /include/ "tps65217.dtsi" 201 - 202 - &tps { 203 - regulators { 204 - dcdc1_reg: regulator@0 { 205 - regulator-always-on; 206 - }; 207 - 208 - dcdc2_reg: regulator@1 { 209 - /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 210 - regulator-name = "vdd_mpu"; 211 - regulator-min-microvolt = <925000>; 212 - regulator-max-microvolt = <1325000>; 213 - regulator-boot-on; 214 - regulator-always-on; 215 - }; 216 - 217 - dcdc3_reg: regulator@2 { 218 - /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 219 - regulator-name = "vdd_core"; 220 - regulator-min-microvolt = <925000>; 221 - regulator-max-microvolt = <1150000>; 222 - regulator-boot-on; 223 - regulator-always-on; 224 - }; 225 - 226 - ldo1_reg: regulator@3 { 227 - regulator-always-on; 228 - }; 229 - 230 - ldo2_reg: regulator@4 { 231 - regulator-always-on; 232 - }; 233 - 234 - ldo3_reg: regulator@5 { 235 - regulator-always-on; 236 - }; 237 - 238 - ldo4_reg: regulator@6 { 239 - regulator-always-on; 240 - }; 241 - }; 242 - }; 243 - 244 - &cpsw_emac0 { 245 - phy_id = <&davinci_mdio>, <0>; 246 - phy-mode = "mii"; 247 - }; 248 - 249 - &cpsw_emac1 { 250 - phy_id = <&davinci_mdio>, <1>; 251 - phy-mode = "mii"; 252 - }; 253 - 254 - &mac { 255 - pinctrl-names = "default", "sleep"; 256 - pinctrl-0 = <&cpsw_default>; 257 - pinctrl-1 = <&cpsw_sleep>; 258 - 259 - }; 260 - 261 - &davinci_mdio { 262 - pinctrl-names = "default", "sleep"; 263 - pinctrl-0 = <&davinci_mdio_default>; 264 - pinctrl-1 = <&davinci_mdio_sleep>; 265 - }; 11 + #include "am335x-bone-common.dtsi"
+17
arch/arm/boot/dts/am335x-boneblack.dts
··· 1 + /* 2 + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + /dts-v1/; 9 + 10 + #include "am33xx.dtsi" 11 + #include "am335x-bone-common.dtsi" 12 + 13 + &ldo3_reg { 14 + regulator-min-microvolt = <1800000>; 15 + regulator-max-microvolt = <1800000>; 16 + regulator-always-on; 17 + };
+3 -3
arch/arm/boot/dts/imx27.dtsi
··· 187 187 compatible = "fsl,imx27-cspi"; 188 188 reg = <0x1000e000 0x1000>; 189 189 interrupts = <16>; 190 - clocks = <&clks 53>, <&clks 53>; 190 + clocks = <&clks 53>, <&clks 60>; 191 191 clock-names = "ipg", "per"; 192 192 status = "disabled"; 193 193 }; ··· 198 198 compatible = "fsl,imx27-cspi"; 199 199 reg = <0x1000f000 0x1000>; 200 200 interrupts = <15>; 201 - clocks = <&clks 52>, <&clks 52>; 201 + clocks = <&clks 52>, <&clks 60>; 202 202 clock-names = "ipg", "per"; 203 203 status = "disabled"; 204 204 }; ··· 309 309 compatible = "fsl,imx27-cspi"; 310 310 reg = <0x10017000 0x1000>; 311 311 interrupts = <6>; 312 - clocks = <&clks 51>, <&clks 51>; 312 + clocks = <&clks 51>, <&clks 60>; 313 313 clock-names = "ipg", "per"; 314 314 status = "disabled"; 315 315 };
+1 -1
arch/arm/boot/dts/imx51.dtsi
··· 474 474 compatible = "fsl,imx51-pata", "fsl,imx27-pata"; 475 475 reg = <0x83fe0000 0x4000>; 476 476 interrupts = <70>; 477 - clocks = <&clks 161>; 477 + clocks = <&clks 172>; 478 478 status = "disabled"; 479 479 }; 480 480
+2 -2
arch/arm/boot/dts/imx6q-pinfunc.h
··· 207 207 #define MX6QDL_PAD_EIM_D29__ECSPI4_SS0 0x0c8 0x3dc 0x824 0x2 0x1 208 208 #define MX6QDL_PAD_EIM_D29__UART2_RTS_B 0x0c8 0x3dc 0x924 0x4 0x1 209 209 #define MX6QDL_PAD_EIM_D29__UART2_CTS_B 0x0c8 0x3dc 0x000 0x4 0x0 210 - #define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c4 0x3dc 0x000 0x4 0x0 211 - #define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c4 0x3dc 0x924 0x4 0x1 210 + #define MX6QDL_PAD_EIM_D29__UART2_DTE_RTS_B 0x0c8 0x3dc 0x000 0x4 0x0 211 + #define MX6QDL_PAD_EIM_D29__UART2_DTE_CTS_B 0x0c8 0x3dc 0x924 0x4 0x1 212 212 #define MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x0c8 0x3dc 0x000 0x5 0x0 213 213 #define MX6QDL_PAD_EIM_D29__IPU2_CSI1_VSYNC 0x0c8 0x3dc 0x8e4 0x6 0x0 214 214 #define MX6QDL_PAD_EIM_D29__IPU1_DI0_PIN14 0x0c8 0x3dc 0x000 0x7 0x0
+1 -1
arch/arm/boot/dts/omap3-beagle-xm.dts
··· 11 11 12 12 / { 13 13 model = "TI OMAP3 BeagleBoard xM"; 14 - compatible = "ti,omap3-beagle-xm, ti,omap3-beagle", "ti,omap3"; 14 + compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3"; 15 15 16 16 cpus { 17 17 cpu@0 {
+14
arch/arm/boot/dts/omap3-igep.dtsi
··· 48 48 >; 49 49 }; 50 50 51 + mcbsp2_pins: pinmux_mcbsp2_pins { 52 + pinctrl-single,pins = < 53 + 0x10c (PIN_INPUT | MUX_MODE0) /* mcbsp2_fsx.mcbsp2_fsx */ 54 + 0x10e (PIN_INPUT | MUX_MODE0) /* mcbsp2_clkx.mcbsp2_clkx */ 55 + 0x110 (PIN_INPUT | MUX_MODE0) /* mcbsp2_dr.mcbsp2.dr */ 56 + 0x112 (PIN_OUTPUT | MUX_MODE0) /* mcbsp2_dx.mcbsp2_dx */ 57 + >; 58 + }; 59 + 51 60 mmc1_pins: pinmux_mmc1_pins { 52 61 pinctrl-single,pins = < 53 62 0x114 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc1_clk.sdmmc1_clk */ ··· 100 91 101 92 &i2c2 { 102 93 clock-frequency = <400000>; 94 + }; 95 + 96 + &mcbsp2 { 97 + pinctrl-names = "default"; 98 + pinctrl-0 = <&mcbsp2_pins>; 103 99 }; 104 100 105 101 &mmc1 {
+45 -1
arch/arm/boot/dts/omap4-panda-common.dtsi
··· 107 107 */ 108 108 clock-frequency = <19200000>; 109 109 }; 110 + 111 + /* regulator for wl12xx on sdio5 */ 112 + wl12xx_vmmc: wl12xx_vmmc { 113 + pinctrl-names = "default"; 114 + pinctrl-0 = <&wl12xx_gpio>; 115 + compatible = "regulator-fixed"; 116 + regulator-name = "vwl1271"; 117 + regulator-min-microvolt = <1800000>; 118 + regulator-max-microvolt = <1800000>; 119 + gpio = <&gpio2 11 0>; 120 + startup-delay-us = <70000>; 121 + enable-active-high; 122 + }; 110 123 }; 111 124 112 125 &omap4_pmx_wkup { ··· 248 235 0x1c (PIN_OUTPUT | MUX_MODE3) /* gpio_wk8 */ 249 236 >; 250 237 }; 238 + 239 + /* 240 + * wl12xx GPIO outputs for WLAN_EN, BT_EN, FM_EN, BT_WAKEUP 241 + * REVISIT: Are the pull-ups needed for GPIO 48 and 49? 242 + */ 243 + wl12xx_gpio: pinmux_wl12xx_gpio { 244 + pinctrl-single,pins = < 245 + 0x26 (PIN_OUTPUT | MUX_MODE3) /* gpmc_a19.gpio_43 */ 246 + 0x2c (PIN_OUTPUT | MUX_MODE3) /* gpmc_a22.gpio_46 */ 247 + 0x30 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a24.gpio_48 */ 248 + 0x32 (PIN_OUTPUT_PULLUP | MUX_MODE3) /* gpmc_a25.gpio_49 */ 249 + >; 250 + }; 251 + 252 + /* wl12xx GPIO inputs and SDIO pins */ 253 + wl12xx_pins: pinmux_wl12xx_pins { 254 + pinctrl-single,pins = < 255 + 0x38 (PIN_INPUT | MUX_MODE3) /* gpmc_ncs2.gpio_52 */ 256 + 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 257 + 0x108 (PIN_OUTPUT | MUX_MODE0) /* sdmmc5_clk.sdmmc5_clk */ 258 + 0x10a (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_cmd.sdmmc5_cmd */ 259 + 0x10c (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat0.sdmmc5_dat0 */ 260 + 0x10e (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat1.sdmmc5_dat1 */ 261 + 0x110 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat2.sdmmc5_dat2 */ 262 + 0x112 (PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */ 263 + >; 264 + }; 251 265 }; 252 266 253 267 &i2c1 { ··· 354 314 }; 355 315 356 316 &mmc5 { 357 - ti,non-removable; 317 + pinctrl-names = "default"; 318 + pinctrl-0 = <&wl12xx_pins>; 319 + vmmc-supply = <&wl12xx_vmmc>; 320 + non-removable; 358 321 bus-width = <4>; 322 + cap-power-off-card; 359 323 }; 360 324 361 325 &emif1 {
+38 -1
arch/arm/boot/dts/omap4-sdp.dts
··· 140 140 "DMic", "Digital Mic", 141 141 "Digital Mic", "Digital Mic1 Bias"; 142 142 }; 143 + 144 + /* regulator for wl12xx on sdio5 */ 145 + wl12xx_vmmc: wl12xx_vmmc { 146 + pinctrl-names = "default"; 147 + pinctrl-0 = <&wl12xx_gpio>; 148 + compatible = "regulator-fixed"; 149 + regulator-name = "vwl1271"; 150 + regulator-min-microvolt = <1800000>; 151 + regulator-max-microvolt = <1800000>; 152 + gpio = <&gpio2 22 0>; 153 + startup-delay-us = <70000>; 154 + enable-active-high; 155 + }; 143 156 }; 144 157 145 158 &omap4_pmx_wkup { ··· 308 295 0xf0 (PIN_INPUT_PULLUP | MUX_MODE0) /* i2c4_sda */ 309 296 >; 310 297 }; 298 + 299 + /* wl12xx GPIO output for WLAN_EN */ 300 + wl12xx_gpio: pinmux_wl12xx_gpio { 301 + pinctrl-single,pins = < 302 + 0x3c (PIN_OUTPUT | MUX_MODE3) /* gpmc_nwp.gpio_54 */ 303 + >; 304 + }; 305 + 306 + /* wl12xx GPIO inputs and SDIO pins */ 307 + wl12xx_pins: pinmux_wl12xx_pins { 308 + pinctrl-single,pins = < 309 + 0x3a (PIN_INPUT | MUX_MODE3) /* gpmc_ncs3.gpio_53 */ 310 + 0x108 (PIN_OUTPUT | MUX_MODE3) /* sdmmc5_clk.sdmmc5_clk */ 311 + 0x10a (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_cmd.sdmmc5_cmd */ 312 + 0x10c (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat0.sdmmc5_dat0 */ 313 + 0x10e (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat1.sdmmc5_dat1 */ 314 + 0x110 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat2.sdmmc5_dat2 */ 315 + 0x112 (PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc5_dat3.sdmmc5_dat3 */ 316 + >; 317 + }; 311 318 }; 312 319 313 320 &i2c1 { ··· 453 420 }; 454 421 455 422 &mmc5 { 423 + pinctrl-names = "default"; 424 + pinctrl-0 = <&wl12xx_pins>; 425 + vmmc-supply = <&wl12xx_vmmc>; 426 + non-removable; 456 427 bus-width = <4>; 457 - ti,non-removable; 428 + cap-power-off-card; 458 429 }; 459 430 460 431 &emif1 {
+4 -3
arch/arm/boot/dts/omap5.dtsi
··· 637 637 omap_dwc3@4a020000 { 638 638 compatible = "ti,dwc3"; 639 639 ti,hwmods = "usb_otg_ss"; 640 - reg = <0x4a020000 0x1000>; 640 + reg = <0x4a020000 0x10000>; 641 641 interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>; 642 642 #address-cells = <1>; 643 643 #size-cells = <1>; ··· 645 645 ranges; 646 646 dwc3@4a030000 { 647 647 compatible = "snps,dwc3"; 648 - reg = <0x4a030000 0x1000>; 648 + reg = <0x4a030000 0x10000>; 649 649 interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; 650 650 usb-phy = <&usb2_phy>, <&usb3_phy>; 651 651 tx-fifo-resize; 652 652 }; 653 653 }; 654 654 655 - ocp2scp { 655 + ocp2scp@4a080000 { 656 656 compatible = "ti,omap-ocp2scp"; 657 657 #address-cells = <1>; 658 658 #size-cells = <1>; 659 + reg = <0x4a080000 0x20>; 659 660 ranges; 660 661 ti,hwmods = "ocp2scp1"; 661 662 usb2_phy: usb2phy@4a084000 {
+2
arch/arm/configs/multi_v7_defconfig
··· 36 36 CONFIG_TEGRA_PCI=y 37 37 CONFIG_TEGRA_EMC_SCALING_ENABLE=y 38 38 CONFIG_ARCH_U8500=y 39 + CONFIG_MACH_HREFV60=y 39 40 CONFIG_MACH_SNOWBALL=y 40 41 CONFIG_MACH_UX500_DT=y 41 42 CONFIG_ARCH_VEXPRESS=y ··· 47 46 CONFIG_SMP=y 48 47 CONFIG_HIGHPTE=y 49 48 CONFIG_ARM_APPENDED_DTB=y 49 + CONFIG_ARM_ATAG_DTB_COMPAT=y 50 50 CONFIG_NET=y 51 51 CONFIG_UNIX=y 52 52 CONFIG_INET=y
+1
arch/arm/mach-imx/clk-fixup-mux.c
··· 90 90 init.ops = &clk_fixup_mux_ops; 91 91 init.parent_names = parents; 92 92 init.num_parents = num_parents; 93 + init.flags = 0; 93 94 94 95 fixup_mux->mux.reg = reg; 95 96 fixup_mux->mux.shift = shift;
+1 -1
arch/arm/mach-imx/clk-imx27.c
··· 285 285 clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL); 286 286 clk_register_clkdev(clk[rtc_ipg_gate], NULL, "imx21-rtc"); 287 287 clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL); 288 - clk_register_clkdev(clk[cpu_div], NULL, "cpufreq-cpu0.0"); 288 + clk_register_clkdev(clk[cpu_div], NULL, "cpu0"); 289 289 clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL); 290 290 291 291 mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
+2 -2
arch/arm/mach-imx/clk-imx51-imx53.c
··· 328 328 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1"); 329 329 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2"); 330 330 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); 331 - clk_register_clkdev(clk[cpu_podf], NULL, "cpufreq-cpu0.0"); 331 + clk_register_clkdev(clk[cpu_podf], NULL, "cpu0"); 332 332 clk_register_clkdev(clk[iim_gate], "iim", NULL); 333 333 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0"); 334 334 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1"); ··· 397 397 mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel)); 398 398 clk[spdif1_sel] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2, 399 399 spdif_sel, ARRAY_SIZE(spdif_sel)); 400 - clk[spdif1_pred] = imx_clk_divider("spdif1_podf", "spdif1_sel", MXC_CCM_CDCDR, 16, 3); 400 + clk[spdif1_pred] = imx_clk_divider("spdif1_pred", "spdif1_sel", MXC_CCM_CDCDR, 16, 3); 401 401 clk[spdif1_podf] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6); 402 402 clk[spdif1_com_sel] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1, 403 403 mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel));
+7 -2
arch/arm/mach-imx/mach-imx6q.c
··· 233 233 of_node_put(np); 234 234 } 235 235 236 - static void __init imx6q_opp_init(struct device *cpu_dev) 236 + static void __init imx6q_opp_init(void) 237 237 { 238 238 struct device_node *np; 239 + struct device *cpu_dev = get_cpu_device(0); 239 240 241 + if (!cpu_dev) { 242 + pr_warn("failed to get cpu0 device\n"); 243 + return; 244 + } 240 245 np = of_node_get(cpu_dev->of_node); 241 246 if (!np) { 242 247 pr_warn("failed to find cpu0 node\n"); ··· 273 268 imx6q_cpuidle_init(); 274 269 275 270 if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) { 276 - imx6q_opp_init(&imx6q_cpufreq_pdev.dev); 271 + imx6q_opp_init(); 277 272 platform_device_register(&imx6q_cpufreq_pdev); 278 273 } 279 274 }
+11
arch/arm/mach-imx/system.c
··· 117 117 /* Configure the L2 PREFETCH and POWER registers */ 118 118 val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); 119 119 val |= 0x70800000; 120 + /* 121 + * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 122 + * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2 123 + * But according to ARM PL310 errata: 752271 124 + * ID: 752271: Double linefill feature can cause data corruption 125 + * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2 126 + * Workaround: The only workaround to this erratum is to disable the 127 + * double linefill feature. This is the default behavior. 128 + */ 129 + if (cpu_is_imx6q()) 130 + val &= ~(1 << 30 | 1 << 23); 120 131 writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); 121 132 val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN; 122 133 writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
+1 -1
arch/arm/mach-omap2/cclock44xx_data.c
··· 1632 1632 CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck), 1633 1633 CLK(NULL, "auxclk5_ck", &auxclk5_ck), 1634 1634 CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck), 1635 - CLK("omap-gpmc", "fck", &dummy_ck), 1635 + CLK("50000000.gpmc", "fck", &dummy_ck), 1636 1636 CLK("omap_i2c.1", "ick", &dummy_ck), 1637 1637 CLK("omap_i2c.2", "ick", &dummy_ck), 1638 1638 CLK("omap_i2c.3", "ick", &dummy_ck),
+1 -1
arch/arm/mach-omap2/cpuidle44xx.c
··· 143 143 * Call idle CPU cluster PM exit notifier chain 144 144 * to restore GIC and wakeupgen context. 145 145 */ 146 - if ((cx->mpu_state == PWRDM_POWER_RET) && 146 + if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) && 147 147 (cx->mpu_logic_state == PWRDM_POWER_OFF)) 148 148 cpu_cluster_pm_exit(); 149 149
+2 -2
arch/arm/mach-omap2/gpmc.c
··· 1491 1491 */ 1492 1492 ret = gpmc_cs_remap(cs, res.start); 1493 1493 if (ret < 0) { 1494 - dev_err(&pdev->dev, "cannot remap GPMC CS %d to 0x%x\n", 1495 - cs, res.start); 1494 + dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n", 1495 + cs, &res.start); 1496 1496 goto err; 1497 1497 } 1498 1498
+1 -1
arch/arm/mach-omap2/mux34xx.c
··· 620 620 "uart1_rts", "ssi1_flag_tx", NULL, NULL, 621 621 "gpio_149", NULL, NULL, "safe_mode"), 622 622 _OMAP3_MUXENTRY(UART1_RX, 151, 623 - "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk", 623 + "uart1_rx", "ssi1_wake_tx", "mcbsp1_clkr", "mcspi4_clk", 624 624 "gpio_151", NULL, NULL, "safe_mode"), 625 625 _OMAP3_MUXENTRY(UART1_TX, 148, 626 626 "uart1_tx", "ssi1_dat_tx", NULL, NULL,
+1 -1
arch/arm/mach-omap2/omap-smp.c
··· 1 1 /* 2 - * OMAP4 SMP source file. It contains platform specific fucntions 2 + * OMAP4 SMP source file. It contains platform specific functions 3 3 * needed for the linux smp kernel. 4 4 * 5 5 * Copyright (C) 2009 Texas Instruments, Inc.
+1 -1
arch/arm/mach-omap2/omap_device.c
··· 158 158 } 159 159 160 160 od = omap_device_alloc(pdev, hwmods, oh_cnt); 161 - if (!od) { 161 + if (IS_ERR(od)) { 162 162 dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n", 163 163 oh_name); 164 164 ret = PTR_ERR(od);
+1 -1
arch/arm/mach-sa1100/collie.c
··· 289 289 } 290 290 291 291 static struct flash_platform_data collie_flash_data = { 292 - .map_name = "cfi_probe", 292 + .map_name = "jedec_probe", 293 293 .init = collie_flash_init, 294 294 .set_vpp = collie_set_vpp, 295 295 .exit = collie_flash_exit,
+1 -1
arch/arm/mach-shmobile/clock-r8a73a4.c
··· 555 555 CLKDEV_CON_ID("pll2h", &pll2h_clk), 556 556 557 557 /* CPU clock */ 558 - CLKDEV_DEV_ID("cpufreq-cpu0", &z_clk), 558 + CLKDEV_DEV_ID("cpu0", &z_clk), 559 559 560 560 /* DIV6 */ 561 561 CLKDEV_CON_ID("zb", &div6_clks[DIV6_ZB]),
+1 -1
arch/arm/mach-shmobile/clock-sh73a0.c
··· 616 616 CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */ 617 617 618 618 /* DIV4 clocks */ 619 - CLKDEV_DEV_ID("cpufreq-cpu0", &div4_clks[DIV4_Z]), 619 + CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]), 620 620 621 621 /* DIV6 clocks */ 622 622 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
+5 -5
arch/arm/mach-u300/Kconfig
··· 1 - menu "ST-Ericsson AB U300/U335 Platform" 2 - 3 - comment "ST-Ericsson Mobile Platform Products" 4 - 5 1 config ARCH_U300 6 2 bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5 7 3 depends on MMU ··· 21 25 help 22 26 Support for ST-Ericsson U300 series mobile platforms. 23 27 24 - comment "ST-Ericsson U300/U335 Feature Selections" 28 + if ARCH_U300 29 + 30 + menu "ST-Ericsson AB U300/U335 Platform" 25 31 26 32 config MACH_U300 27 33 depends on ARCH_U300 ··· 51 53 SPI framework and ARM PL022 support. 52 54 53 55 endmenu 56 + 57 + endif
+1
arch/arm/mach-ux500/cache-l2x0.c
··· 69 69 * some SMI service available. 70 70 */ 71 71 outer_cache.disable = NULL; 72 + outer_cache.set_debug = NULL; 72 73 73 74 return 0; 74 75 }
+1 -1
arch/arm64/include/asm/hwcap.h
··· 43 43 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ 44 44 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) 45 45 46 - extern unsigned int elf_hwcap; 46 + extern unsigned long elf_hwcap; 47 47 #endif 48 48 #endif
+16 -5
arch/arm64/kernel/process.c
··· 143 143 144 144 void __show_regs(struct pt_regs *regs) 145 145 { 146 - int i; 146 + int i, top_reg; 147 + u64 lr, sp; 148 + 149 + if (compat_user_mode(regs)) { 150 + lr = regs->compat_lr; 151 + sp = regs->compat_sp; 152 + top_reg = 12; 153 + } else { 154 + lr = regs->regs[30]; 155 + sp = regs->sp; 156 + top_reg = 29; 157 + } 147 158 148 159 show_regs_print_info(KERN_DEFAULT); 149 160 print_symbol("PC is at %s\n", instruction_pointer(regs)); 150 - print_symbol("LR is at %s\n", regs->regs[30]); 161 + print_symbol("LR is at %s\n", lr); 151 162 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", 152 - regs->pc, regs->regs[30], regs->pstate); 153 - printk("sp : %016llx\n", regs->sp); 154 - for (i = 29; i >= 0; i--) { 163 + regs->pc, lr, regs->pstate); 164 + printk("sp : %016llx\n", sp); 165 + for (i = top_reg; i >= 0; i--) { 155 166 printk("x%-2d: %016llx ", i, regs->regs[i]); 156 167 if (i % 2 == 0) 157 168 printk("\n");
+1 -1
arch/arm64/kernel/setup.c
··· 57 57 unsigned int processor_id; 58 58 EXPORT_SYMBOL(processor_id); 59 59 60 - unsigned int elf_hwcap __read_mostly; 60 + unsigned long elf_hwcap __read_mostly; 61 61 EXPORT_SYMBOL_GPL(elf_hwcap); 62 62 63 63 static const char *cpu_name;
+1 -1
arch/arm64/mm/fault.c
··· 130 130 force_sig_info(sig, &si, tsk); 131 131 } 132 132 133 - void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) 133 + static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) 134 134 { 135 135 struct task_struct *tsk = current; 136 136 struct mm_struct *mm = tsk->active_mm;
-3
arch/mips/Makefile
··· 288 288 vmlinux.32: vmlinux 289 289 $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@ 290 290 291 - 292 - #obj-$(CONFIG_KPROBES) += kprobes.o 293 - 294 291 # 295 292 # The 64-bit ELF tools are pretty broken so at this time we generate 64-bit 296 293 # ELF files from 32-bit files by conversion.
+2 -1
arch/mips/alchemy/common/usb.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/spinlock.h> 16 16 #include <linux/syscore_ops.h> 17 + #include <asm/cpu.h> 17 18 #include <asm/mach-au1x00/au1000.h> 18 19 19 20 /* control register offsets */ ··· 359 358 { 360 359 #if defined(CONFIG_DMA_COHERENT) 361 360 /* Au1200 AB USB does not support coherent memory */ 362 - if (!(read_c0_prid() & 0xff)) { 361 + if (!(read_c0_prid() & PRID_REV_MASK)) { 363 362 printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); 364 363 printk(KERN_INFO "Au1200 USB: update your board or re-configure" 365 364 " the kernel\n");
+2 -2
arch/mips/bcm63xx/cpu.c
··· 306 306 307 307 switch (c->cputype) { 308 308 case CPU_BMIPS3300: 309 - if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT) 309 + if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT) 310 310 __cpu_name[cpu] = "Broadcom BCM6338"; 311 311 /* fall-through */ 312 312 case CPU_BMIPS32: 313 313 chipid_reg = BCM_6345_PERF_BASE; 314 314 break; 315 315 case CPU_BMIPS4350: 316 - switch ((read_c0_prid() & 0xff)) { 316 + switch ((read_c0_prid() & PRID_REV_MASK)) { 317 317 case 0x04: 318 318 chipid_reg = BCM_3368_PERF_BASE; 319 319 break;
+1
arch/mips/cavium-octeon/csrc-octeon.c
··· 12 12 #include <linux/smp.h> 13 13 14 14 #include <asm/cpu-info.h> 15 + #include <asm/cpu-type.h> 15 16 #include <asm/time.h> 16 17 17 18 #include <asm/octeon/octeon.h>
+1
arch/mips/dec/prom/init.c
··· 13 13 14 14 #include <asm/bootinfo.h> 15 15 #include <asm/cpu.h> 16 + #include <asm/cpu-type.h> 16 17 #include <asm/processor.h> 17 18 18 19 #include <asm/dec/prom.h>
-6
arch/mips/include/asm/cpu-features.h
··· 13 13 #include <asm/cpu-info.h> 14 14 #include <cpu-feature-overrides.h> 15 15 16 - #ifndef current_cpu_type 17 - #define current_cpu_type() current_cpu_data.cputype 18 - #endif 19 - 20 - #define boot_cpu_type() cpu_data[0].cputype 21 - 22 16 /* 23 17 * SMP assumption: Options of CPU 0 are a superset of all processors. 24 18 * This is true for all known MIPS systems.
+1
arch/mips/include/asm/cpu-info.h
··· 84 84 extern struct cpuinfo_mips cpu_data[]; 85 85 #define current_cpu_data cpu_data[smp_processor_id()] 86 86 #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] 87 + #define boot_cpu_data cpu_data[0] 87 88 88 89 extern void cpu_probe(void); 89 90 extern void cpu_report(void);
+203
arch/mips/include/asm/cpu-type.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2003, 2004 Ralf Baechle 7 + * Copyright (C) 2004 Maciej W. Rozycki 8 + */ 9 + #ifndef __ASM_CPU_TYPE_H 10 + #define __ASM_CPU_TYPE_H 11 + 12 + #include <linux/smp.h> 13 + #include <linux/compiler.h> 14 + 15 + static inline int __pure __get_cpu_type(const int cpu_type) 16 + { 17 + switch (cpu_type) { 18 + #if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \ 19 + defined(CONFIG_SYS_HAS_CPU_LOONGSON2F) 20 + case CPU_LOONGSON2: 21 + #endif 22 + 23 + #ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B 24 + case CPU_LOONGSON1: 25 + #endif 26 + 27 + #ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1 28 + case CPU_4KC: 29 + case CPU_ALCHEMY: 30 + case CPU_BMIPS3300: 31 + case CPU_BMIPS4350: 32 + case CPU_PR4450: 33 + case CPU_BMIPS32: 34 + case CPU_JZRISC: 35 + #endif 36 + 37 + #if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \ 38 + defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) 39 + case CPU_4KEC: 40 + #endif 41 + 42 + #ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2 43 + case CPU_4KSC: 44 + case CPU_24K: 45 + case CPU_34K: 46 + case CPU_1004K: 47 + case CPU_74K: 48 + case CPU_M14KC: 49 + case CPU_M14KEC: 50 + #endif 51 + 52 + #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 53 + case CPU_5KC: 54 + case CPU_5KE: 55 + case CPU_20KC: 56 + case CPU_25KF: 57 + case CPU_SB1: 58 + case CPU_SB1A: 59 + #endif 60 + 61 + #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R2 62 + /* 63 + * All MIPS64 R2 processors have their own special symbols. That is, 64 + * there currently is no pure R2 core 65 + */ 66 + #endif 67 + 68 + #ifdef CONFIG_SYS_HAS_CPU_R3000 69 + case CPU_R2000: 70 + case CPU_R3000: 71 + case CPU_R3000A: 72 + case CPU_R3041: 73 + case CPU_R3051: 74 + case CPU_R3052: 75 + case CPU_R3081: 76 + case CPU_R3081E: 77 + #endif 78 + 79 + #ifdef CONFIG_SYS_HAS_CPU_TX39XX 80 + case CPU_TX3912: 81 + case CPU_TX3922: 82 + case CPU_TX3927: 83 + #endif 84 + 85 + #ifdef CONFIG_SYS_HAS_CPU_VR41XX 86 + case CPU_VR41XX: 87 + case CPU_VR4111: 88 + case CPU_VR4121: 89 + case CPU_VR4122: 90 + case CPU_VR4131: 91 + case CPU_VR4133: 92 + case CPU_VR4181: 93 + case CPU_VR4181A: 94 + #endif 95 + 96 + #ifdef CONFIG_SYS_HAS_CPU_R4300 97 + case CPU_R4300: 98 + case CPU_R4310: 99 + #endif 100 + 101 + #ifdef CONFIG_SYS_HAS_CPU_R4X00 102 + case CPU_R4000PC: 103 + case CPU_R4000SC: 104 + case CPU_R4000MC: 105 + case CPU_R4200: 106 + case CPU_R4400PC: 107 + case CPU_R4400SC: 108 + case CPU_R4400MC: 109 + case CPU_R4600: 110 + case CPU_R4700: 111 + case CPU_R4640: 112 + case CPU_R4650: 113 + #endif 114 + 115 + #ifdef CONFIG_SYS_HAS_CPU_TX49XX 116 + case CPU_TX49XX: 117 + #endif 118 + 119 + #ifdef CONFIG_SYS_HAS_CPU_R5000 120 + case CPU_R5000: 121 + #endif 122 + 123 + #ifdef CONFIG_SYS_HAS_CPU_R5432 124 + case CPU_R5432: 125 + #endif 126 + 127 + #ifdef CONFIG_SYS_HAS_CPU_R5500 128 + case CPU_R5500: 129 + #endif 130 + 131 + #ifdef CONFIG_SYS_HAS_CPU_R6000 132 + case CPU_R6000: 133 + case CPU_R6000A: 134 + #endif 135 + 136 + #ifdef CONFIG_SYS_HAS_CPU_NEVADA 137 + case CPU_NEVADA: 138 + #endif 139 + 140 + #ifdef CONFIG_SYS_HAS_CPU_R8000 141 + case CPU_R8000: 142 + #endif 143 + 144 + #ifdef CONFIG_SYS_HAS_CPU_R10000 145 + case CPU_R10000: 146 + case CPU_R12000: 147 + case CPU_R14000: 148 + #endif 149 + #ifdef CONFIG_SYS_HAS_CPU_RM7000 150 + case CPU_RM7000: 151 + case CPU_SR71000: 152 + #endif 153 + #ifdef CONFIG_SYS_HAS_CPU_RM9000 154 + case CPU_RM9000: 155 + #endif 156 + #ifdef CONFIG_SYS_HAS_CPU_SB1 157 + case CPU_SB1: 158 + case CPU_SB1A: 159 + #endif 160 + #ifdef CONFIG_SYS_HAS_CPU_CAVIUM_OCTEON 161 + case CPU_CAVIUM_OCTEON: 162 + case CPU_CAVIUM_OCTEON_PLUS: 163 + case CPU_CAVIUM_OCTEON2: 164 + #endif 165 + 166 + #ifdef CONFIG_SYS_HAS_CPU_BMIPS4380 167 + case CPU_BMIPS4380: 168 + #endif 169 + 170 + #ifdef CONFIG_SYS_HAS_CPU_BMIPS5000 171 + case CPU_BMIPS5000: 172 + #endif 173 + 174 + #ifdef CONFIG_SYS_HAS_CPU_XLP 175 + case CPU_XLP: 176 + #endif 177 + 178 + #ifdef CONFIG_SYS_HAS_CPU_XLR 179 + case CPU_XLR: 180 + #endif 181 + break; 182 + default: 183 + unreachable(); 184 + } 185 + 186 + return cpu_type; 187 + } 188 + 189 + static inline int __pure current_cpu_type(void) 190 + { 191 + const int cpu_type = current_cpu_data.cputype; 192 + 193 + return __get_cpu_type(cpu_type); 194 + } 195 + 196 + static inline int __pure boot_cpu_type(void) 197 + { 198 + const int cpu_type = cpu_data[0].cputype; 199 + 200 + return __get_cpu_type(cpu_type); 201 + } 202 + 203 + #endif /* __ASM_CPU_TYPE_H */
+29 -9
arch/mips/include/asm/cpu.h
··· 3 3 * various MIPS cpu types. 4 4 * 5 5 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 6 - * Copyright (C) 2004 Maciej W. Rozycki 6 + * Copyright (C) 2004, 2013 Maciej W. Rozycki 7 7 */ 8 8 #ifndef _ASM_CPU_H 9 9 #define _ASM_CPU_H 10 10 11 - /* Assigned Company values for bits 23:16 of the PRId Register 12 - (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from 13 - MTI, the PRId register is defined in this (backwards compatible) 14 - way: 11 + /* 12 + As of the MIPS32 and MIPS64 specs from MTI, the PRId register (CP0 13 + register 15, select 0) is defined in this (backwards compatible) way: 15 14 16 15 +----------------+----------------+----------------+----------------+ 17 16 | Company Options| Company ID | Processor ID | Revision | ··· 21 22 that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64 22 23 spec. 23 24 */ 25 + 26 + #define PRID_OPT_MASK 0xff000000 27 + 28 + /* 29 + * Assigned Company values for bits 23:16 of the PRId register. 30 + */ 31 + 32 + #define PRID_COMP_MASK 0xff0000 24 33 25 34 #define PRID_COMP_LEGACY 0x000000 26 35 #define PRID_COMP_MIPS 0x010000 ··· 45 38 #define PRID_COMP_INGENIC 0xd00000 46 39 47 40 /* 48 - * Assigned values for the product ID register. In order to detect a 49 - * certain CPU type exactly eventually additional registers may need to 50 - * be examined. These are valid when 23:16 == PRID_COMP_LEGACY 41 + * Assigned Processor ID (implementation) values for bits 15:8 of the PRId 42 + * register. In order to detect a certain CPU type exactly eventually 43 + * additional registers may need to be examined. 51 44 */ 45 + 46 + #define PRID_IMP_MASK 0xff00 47 + 48 + /* 49 + * These are valid when 23:16 == PRID_COMP_LEGACY 50 + */ 51 + 52 52 #define PRID_IMP_R2000 0x0100 53 53 #define PRID_IMP_AU1_REV1 0x0100 54 54 #define PRID_IMP_AU1_REV2 0x0200 ··· 196 182 #define PRID_IMP_NETLOGIC_XLP2XX 0x1200 197 183 198 184 /* 199 - * Definitions for 7:0 on legacy processors 185 + * Particular Revision values for bits 7:0 of the PRId register. 200 186 */ 201 187 202 188 #define PRID_REV_MASK 0x00ff 189 + 190 + /* 191 + * Definitions for 7:0 on legacy processors 192 + */ 203 193 204 194 #define PRID_REV_TX4927 0x0022 205 195 #define PRID_REV_TX4937 0x0030 ··· 244 226 * +---------------------------------+----------------+----------------+ 245 227 * 31 16 15 8 7 0 246 228 */ 229 + 230 + #define FPIR_IMP_MASK 0xff00 247 231 248 232 #define FPIR_IMP_NONE 0x0000 249 233
+3 -1
arch/mips/include/asm/mach-au1x00/au1000.h
··· 43 43 #include <linux/io.h> 44 44 #include <linux/irq.h> 45 45 46 + #include <asm/cpu.h> 47 + 46 48 /* cpu pipeline flush */ 47 49 void static inline au_sync(void) 48 50 { ··· 142 140 143 141 static inline int alchemy_get_cputype(void) 144 142 { 145 - switch (read_c0_prid() & 0xffff0000) { 143 + switch (read_c0_prid() & (PRID_OPT_MASK | PRID_COMP_MASK)) { 146 144 case 0x00030000: 147 145 return ALCHEMY_CPU_AU1000; 148 146 break;
+2
arch/mips/include/asm/mach-ip22/cpu-feature-overrides.h
··· 8 8 #ifndef __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H 9 9 #define __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H 10 10 11 + #include <asm/cpu.h> 12 + 11 13 /* 12 14 * IP22 with a variety of processors so we can't use defaults for everything. 13 15 */
+2
arch/mips/include/asm/mach-ip27/cpu-feature-overrides.h
··· 8 8 #ifndef __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H 9 9 #define __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H 10 10 11 + #include <asm/cpu.h> 12 + 11 13 /* 12 14 * IP27 only comes with R10000 family processors all using the same config 13 15 */
+2
arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
··· 9 9 #ifndef __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H 10 10 #define __ASM_MACH_IP28_CPU_FEATURE_OVERRIDES_H 11 11 12 + #include <asm/cpu.h> 13 + 12 14 /* 13 15 * IP28 only comes with R10000 family processors all using the same config 14 16 */
+7
arch/mips/include/asm/mipsregs.h
··· 603 603 #define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14) 604 604 #define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14) 605 605 606 + #define MIPS_CONF5_NF (_ULCAST_(1) << 0) 607 + #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 608 + #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) 609 + #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) 610 + #define MIPS_CONF5_CV (_ULCAST_(1) << 29) 611 + #define MIPS_CONF5_K (_ULCAST_(1) << 30) 612 + 606 613 #define MIPS_CONF6_SYND (_ULCAST_(1) << 13) 607 614 608 615 #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
+12
arch/mips/include/asm/pci.h
··· 83 83 extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 84 84 enum pci_mmap_state mmap_state, int write_combine); 85 85 86 + #define HAVE_ARCH_PCI_RESOURCE_TO_USER 87 + 88 + static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, 89 + const struct resource *rsrc, resource_size_t *start, 90 + resource_size_t *end) 91 + { 92 + phys_t size = resource_size(rsrc); 93 + 94 + *start = fixup_bigphys_addr(rsrc->start, size); 95 + *end = rsrc->start + size; 96 + } 97 + 86 98 /* 87 99 * Dynamic DMA mapping stuff. 88 100 * MIPS has everything mapped statically.
+32 -1
arch/mips/include/asm/timex.h
··· 10 10 11 11 #ifdef __KERNEL__ 12 12 13 + #include <asm/cpu-features.h> 13 14 #include <asm/mipsregs.h> 15 + #include <asm/cpu-type.h> 14 16 15 17 /* 16 18 * This is the clock rate of the i8253 PIT. A MIPS system may not have ··· 35 33 36 34 typedef unsigned int cycles_t; 37 35 36 + /* 37 + * On R4000/R4400 before version 5.0 an erratum exists such that if the 38 + * cycle counter is read in the exact moment that it is matching the 39 + * compare register, no interrupt will be generated. 40 + * 41 + * There is a suggested workaround and also the erratum can't strike if 42 + * the compare interrupt isn't being used as the clock source device. 43 + * However for now the implementaton of this function doesn't get these 44 + * fine details right. 45 + */ 38 46 static inline cycles_t get_cycles(void) 39 47 { 40 - return 0; 48 + switch (boot_cpu_type()) { 49 + case CPU_R4400PC: 50 + case CPU_R4400SC: 51 + case CPU_R4400MC: 52 + if ((read_c0_prid() & 0xff) >= 0x0050) 53 + return read_c0_count(); 54 + break; 55 + 56 + case CPU_R4000PC: 57 + case CPU_R4000SC: 58 + case CPU_R4000MC: 59 + break; 60 + 61 + default: 62 + if (cpu_has_counter) 63 + return read_c0_count(); 64 + break; 65 + } 66 + 67 + return 0; /* no usable counter */ 41 68 } 42 69 43 70 #endif /* __KERNEL__ */
+2 -1
arch/mips/include/asm/vga.h
··· 6 6 #ifndef _ASM_VGA_H 7 7 #define _ASM_VGA_H 8 8 9 + #include <asm/addrspace.h> 9 10 #include <asm/byteorder.h> 10 11 11 12 /* ··· 14 13 * access the videoram directly without any black magic. 15 14 */ 16 15 17 - #define VGA_MAP_MEM(x, s) (0xb0000000L + (unsigned long)(x)) 16 + #define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x)) 18 17 19 18 #define vga_readb(x) (*(x)) 20 19 #define vga_writeb(x, y) (*(y) = (x))
+37 -21
arch/mips/kernel/cpu-probe.c
··· 20 20 21 21 #include <asm/bugs.h> 22 22 #include <asm/cpu.h> 23 + #include <asm/cpu-type.h> 23 24 #include <asm/fpu.h> 24 25 #include <asm/mipsregs.h> 25 26 #include <asm/watch.h> ··· 56 55 { 57 56 struct cpuinfo_mips *c = &current_cpu_data; 58 57 59 - switch (c->cputype) { 58 + switch (current_cpu_type()) { 60 59 case CPU_34K: 61 60 /* 62 61 * Erratum "RPS May Cause Incorrect Instruction Execution" ··· 123 122 */ 124 123 static inline int __cpu_has_fpu(void) 125 124 { 126 - return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE); 125 + return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); 127 126 } 128 127 129 128 static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) ··· 291 290 return config4 & MIPS_CONF_M; 292 291 } 293 292 293 + static inline unsigned int decode_config5(struct cpuinfo_mips *c) 294 + { 295 + unsigned int config5; 296 + 297 + config5 = read_c0_config5(); 298 + config5 &= ~MIPS_CONF5_UFR; 299 + write_c0_config5(config5); 300 + 301 + return config5 & MIPS_CONF_M; 302 + } 303 + 294 304 static void decode_configs(struct cpuinfo_mips *c) 295 305 { 296 306 int ok; ··· 322 310 ok = decode_config3(c); 323 311 if (ok) 324 312 ok = decode_config4(c); 313 + if (ok) 314 + ok = decode_config5(c); 325 315 326 316 mips_probe_watch_registers(c); 327 317 ··· 336 322 337 323 static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) 338 324 { 339 - switch (c->processor_id & 0xff00) { 325 + switch (c->processor_id & PRID_IMP_MASK) { 340 326 case PRID_IMP_R2000: 341 327 c->cputype = CPU_R2000; 342 328 __cpu_name[cpu] = "R2000"; ··· 347 333 c->tlbsize = 64; 348 334 break; 349 335 case PRID_IMP_R3000: 350 - if ((c->processor_id & 0xff) == PRID_REV_R3000A) { 336 + if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) { 351 337 if (cpu_has_confreg()) { 352 338 c->cputype = CPU_R3081E; 353 339 __cpu_name[cpu] = "R3081"; ··· 367 353 break; 368 354 case PRID_IMP_R4000: 369 355 if (read_c0_config() & CONF_SC) { 370 - if ((c->processor_id & 0xff) >= PRID_REV_R4400) { 356 + if ((c->processor_id & PRID_REV_MASK) >= 357 + PRID_REV_R4400) { 371 358 c->cputype = CPU_R4400PC; 372 359 __cpu_name[cpu] = "R4400PC"; 373 360 } else { ··· 376 361 __cpu_name[cpu] = "R4000PC"; 377 362 } 378 363 } else { 379 - if ((c->processor_id & 0xff) >= PRID_REV_R4400) { 364 + if ((c->processor_id & PRID_REV_MASK) >= 365 + PRID_REV_R4400) { 380 366 c->cputype = CPU_R4400SC; 381 367 __cpu_name[cpu] = "R4400SC"; 382 368 } else { ··· 470 454 __cpu_name[cpu] = "TX3927"; 471 455 c->tlbsize = 64; 472 456 } else { 473 - switch (c->processor_id & 0xff) { 457 + switch (c->processor_id & PRID_REV_MASK) { 474 458 case PRID_REV_TX3912: 475 459 c->cputype = CPU_TX3912; 476 460 __cpu_name[cpu] = "TX3912"; ··· 656 640 static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) 657 641 { 658 642 decode_configs(c); 659 - switch (c->processor_id & 0xff00) { 643 + switch (c->processor_id & PRID_IMP_MASK) { 660 644 case PRID_IMP_4KC: 661 645 c->cputype = CPU_4KC; 662 646 __cpu_name[cpu] = "MIPS 4Kc"; ··· 727 711 static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) 728 712 { 729 713 decode_configs(c); 730 - switch (c->processor_id & 0xff00) { 714 + switch (c->processor_id & PRID_IMP_MASK) { 731 715 case PRID_IMP_AU1_REV1: 732 716 case PRID_IMP_AU1_REV2: 733 717 c->cputype = CPU_ALCHEMY; ··· 746 730 break; 747 731 case 4: 748 732 __cpu_name[cpu] = "Au1200"; 749 - if ((c->processor_id & 0xff) == 2) 733 + if ((c->processor_id & PRID_REV_MASK) == 2) 750 734 __cpu_name[cpu] = "Au1250"; 751 735 break; 752 736 case 5: ··· 764 748 { 765 749 decode_configs(c); 766 750 767 - switch (c->processor_id & 0xff00) { 751 + switch (c->processor_id & PRID_IMP_MASK) { 768 752 case PRID_IMP_SB1: 769 753 c->cputype = CPU_SB1; 770 754 __cpu_name[cpu] = "SiByte SB1"; 771 755 /* FPU in pass1 is known to have issues. */ 772 - if ((c->processor_id & 0xff) < 0x02) 756 + if ((c->processor_id & PRID_REV_MASK) < 0x02) 773 757 c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); 774 758 break; 775 759 case PRID_IMP_SB1A: ··· 782 766 static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) 783 767 { 784 768 decode_configs(c); 785 - switch (c->processor_id & 0xff00) { 769 + switch (c->processor_id & PRID_IMP_MASK) { 786 770 case PRID_IMP_SR71000: 787 771 c->cputype = CPU_SR71000; 788 772 __cpu_name[cpu] = "Sandcraft SR71000"; ··· 795 779 static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) 796 780 { 797 781 decode_configs(c); 798 - switch (c->processor_id & 0xff00) { 782 + switch (c->processor_id & PRID_IMP_MASK) { 799 783 case PRID_IMP_PR4450: 800 784 c->cputype = CPU_PR4450; 801 785 __cpu_name[cpu] = "Philips PR4450"; ··· 807 791 static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) 808 792 { 809 793 decode_configs(c); 810 - switch (c->processor_id & 0xff00) { 794 + switch (c->processor_id & PRID_IMP_MASK) { 811 795 case PRID_IMP_BMIPS32_REV4: 812 796 case PRID_IMP_BMIPS32_REV8: 813 797 c->cputype = CPU_BMIPS32; ··· 822 806 set_elf_platform(cpu, "bmips3300"); 823 807 break; 824 808 case PRID_IMP_BMIPS43XX: { 825 - int rev = c->processor_id & 0xff; 809 + int rev = c->processor_id & PRID_REV_MASK; 826 810 827 811 if (rev >= PRID_REV_BMIPS4380_LO && 828 812 rev <= PRID_REV_BMIPS4380_HI) { ··· 848 832 static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) 849 833 { 850 834 decode_configs(c); 851 - switch (c->processor_id & 0xff00) { 835 + switch (c->processor_id & PRID_IMP_MASK) { 852 836 case PRID_IMP_CAVIUM_CN38XX: 853 837 case PRID_IMP_CAVIUM_CN31XX: 854 838 case PRID_IMP_CAVIUM_CN30XX: ··· 891 875 decode_configs(c); 892 876 /* JZRISC does not implement the CP0 counter. */ 893 877 c->options &= ~MIPS_CPU_COUNTER; 894 - switch (c->processor_id & 0xff00) { 878 + switch (c->processor_id & PRID_IMP_MASK) { 895 879 case PRID_IMP_JZRISC: 896 880 c->cputype = CPU_JZRISC; 897 881 __cpu_name[cpu] = "Ingenic JZRISC"; ··· 906 890 { 907 891 decode_configs(c); 908 892 909 - if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) { 893 + if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) { 910 894 c->cputype = CPU_ALCHEMY; 911 895 __cpu_name[cpu] = "Au1300"; 912 896 /* following stuff is not for Alchemy */ ··· 921 905 MIPS_CPU_EJTAG | 922 906 MIPS_CPU_LLSC); 923 907 924 - switch (c->processor_id & 0xff00) { 908 + switch (c->processor_id & PRID_IMP_MASK) { 925 909 case PRID_IMP_NETLOGIC_XLP2XX: 926 910 c->cputype = CPU_XLP; 927 911 __cpu_name[cpu] = "Broadcom XLPII"; ··· 1000 984 c->cputype = CPU_UNKNOWN; 1001 985 1002 986 c->processor_id = read_c0_prid(); 1003 - switch (c->processor_id & 0xff0000) { 987 + switch (c->processor_id & PRID_COMP_MASK) { 1004 988 case PRID_COMP_LEGACY: 1005 989 cpu_probe_legacy(c, cpu); 1006 990 break;
+2 -1
arch/mips/kernel/idle.c
··· 18 18 #include <linux/sched.h> 19 19 #include <asm/cpu.h> 20 20 #include <asm/cpu-info.h> 21 + #include <asm/cpu-type.h> 21 22 #include <asm/idle.h> 22 23 #include <asm/mipsregs.h> 23 24 ··· 137 136 return; 138 137 } 139 138 140 - switch (c->cputype) { 139 + switch (current_cpu_type()) { 141 140 case CPU_R3081: 142 141 case CPU_R3081E: 143 142 cpu_wait = r3081_wait;
+1
arch/mips/kernel/time.c
··· 24 24 #include <linux/export.h> 25 25 26 26 #include <asm/cpu-features.h> 27 + #include <asm/cpu-type.h> 27 28 #include <asm/div64.h> 28 29 #include <asm/smtc_ipi.h> 29 30 #include <asm/time.h>
+2 -1
arch/mips/kernel/traps.c
··· 39 39 #include <asm/break.h> 40 40 #include <asm/cop2.h> 41 41 #include <asm/cpu.h> 42 + #include <asm/cpu-type.h> 42 43 #include <asm/dsp.h> 43 44 #include <asm/fpu.h> 44 45 #include <asm/fpu_emulator.h> ··· 623 622 regs->regs[rt] = read_c0_count(); 624 623 return 0; 625 624 case 3: /* Count register resolution */ 626 - switch (current_cpu_data.cputype) { 625 + switch (current_cpu_type()) { 627 626 case CPU_20KC: 628 627 case CPU_25KF: 629 628 regs->regs[rt] = 1;
+4 -2
arch/mips/mm/c-octeon.c
··· 19 19 #include <asm/bootinfo.h> 20 20 #include <asm/cacheops.h> 21 21 #include <asm/cpu-features.h> 22 + #include <asm/cpu-type.h> 22 23 #include <asm/page.h> 23 24 #include <asm/pgtable.h> 24 25 #include <asm/r4kcache.h> ··· 187 186 unsigned long dcache_size; 188 187 unsigned int config1; 189 188 struct cpuinfo_mips *c = &current_cpu_data; 189 + int cputype = current_cpu_type(); 190 190 191 191 config1 = read_c0_config1(); 192 - switch (c->cputype) { 192 + switch (cputype) { 193 193 case CPU_CAVIUM_OCTEON: 194 194 case CPU_CAVIUM_OCTEON_PLUS: 195 195 c->icache.linesz = 2 << ((config1 >> 19) & 7); ··· 201 199 c->icache.sets * c->icache.ways * c->icache.linesz; 202 200 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1; 203 201 c->dcache.linesz = 128; 204 - if (c->cputype == CPU_CAVIUM_OCTEON_PLUS) 202 + if (cputype == CPU_CAVIUM_OCTEON_PLUS) 205 203 c->dcache.sets = 2; /* CN5XXX has two Dcache sets */ 206 204 else 207 205 c->dcache.sets = 1; /* CN3XXX has one Dcache set */
+32 -16
arch/mips/mm/c-r4k.c
··· 12 12 #include <linux/highmem.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/linkage.h> 15 + #include <linux/preempt.h> 15 16 #include <linux/sched.h> 16 17 #include <linux/smp.h> 17 18 #include <linux/mm.h> ··· 25 24 #include <asm/cacheops.h> 26 25 #include <asm/cpu.h> 27 26 #include <asm/cpu-features.h> 27 + #include <asm/cpu-type.h> 28 28 #include <asm/io.h> 29 29 #include <asm/page.h> 30 30 #include <asm/pgtable.h> ··· 603 601 /* Catch bad driver code */ 604 602 BUG_ON(size == 0); 605 603 604 + preempt_disable(); 606 605 if (cpu_has_inclusive_pcaches) { 607 606 if (size >= scache_size) 608 607 r4k_blast_scache(); ··· 624 621 R4600_HIT_CACHEOP_WAR_IMPL; 625 622 blast_dcache_range(addr, addr + size); 626 623 } 624 + preempt_enable(); 627 625 628 626 bc_wback_inv(addr, size); 629 627 __sync(); ··· 635 631 /* Catch bad driver code */ 636 632 BUG_ON(size == 0); 637 633 634 + preempt_disable(); 638 635 if (cpu_has_inclusive_pcaches) { 639 636 if (size >= scache_size) 640 637 r4k_blast_scache(); ··· 660 655 R4600_HIT_CACHEOP_WAR_IMPL; 661 656 blast_inv_dcache_range(addr, addr + size); 662 657 } 658 + preempt_enable(); 663 659 664 660 bc_inv(addr, size); 665 661 __sync(); ··· 786 780 787 781 static inline void alias_74k_erratum(struct cpuinfo_mips *c) 788 782 { 783 + unsigned int imp = c->processor_id & PRID_IMP_MASK; 784 + unsigned int rev = c->processor_id & PRID_REV_MASK; 785 + 789 786 /* 790 787 * Early versions of the 74K do not update the cache tags on a 791 788 * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG 792 789 * aliases. In this case it is better to treat the cache as always 793 790 * having aliases. 794 791 */ 795 - if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) 796 - c->dcache.flags |= MIPS_CACHE_VTAG; 797 - if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) 798 - write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 799 - if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && 800 - ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { 801 - c->dcache.flags |= MIPS_CACHE_VTAG; 802 - write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 792 + switch (imp) { 793 + case PRID_IMP_74K: 794 + if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) 795 + c->dcache.flags |= MIPS_CACHE_VTAG; 796 + if (rev == PRID_REV_ENCODE_332(2, 4, 0)) 797 + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 798 + break; 799 + case PRID_IMP_1074K: 800 + if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { 801 + c->dcache.flags |= MIPS_CACHE_VTAG; 802 + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); 803 + } 804 + break; 805 + default: 806 + BUG(); 803 807 } 804 808 } 805 809 ··· 825 809 unsigned long config1; 826 810 unsigned int lsize; 827 811 828 - switch (c->cputype) { 812 + switch (current_cpu_type()) { 829 813 case CPU_R4600: /* QED style two way caches? */ 830 814 case CPU_R4700: 831 815 case CPU_R5000: ··· 1041 1025 * presumably no vendor is shipping his hardware in the "bad" 1042 1026 * configuration. 1043 1027 */ 1044 - if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && 1028 + if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 && 1029 + (prid & PRID_REV_MASK) < PRID_REV_R4400 && 1045 1030 !(config & CONF_SC) && c->icache.linesz != 16 && 1046 1031 PAGE_SIZE <= 0x8000) 1047 1032 panic("Improper R4000SC processor configuration detected"); ··· 1062 1045 * normally they'd suffer from aliases but magic in the hardware deals 1063 1046 * with that for us so we don't need to take care ourselves. 1064 1047 */ 1065 - switch (c->cputype) { 1048 + switch (current_cpu_type()) { 1066 1049 case CPU_20KC: 1067 1050 case CPU_25KF: 1068 1051 case CPU_SB1: ··· 1082 1065 case CPU_34K: 1083 1066 case CPU_74K: 1084 1067 case CPU_1004K: 1085 - if (c->cputype == CPU_74K) 1068 + if (current_cpu_type() == CPU_74K) 1086 1069 alias_74k_erratum(c); 1087 1070 if ((read_c0_config7() & (1 << 16))) { 1088 1071 /* effectively physically indexed dcache, ··· 1095 1078 c->dcache.flags |= MIPS_CACHE_ALIASES; 1096 1079 } 1097 1080 1098 - switch (c->cputype) { 1081 + switch (current_cpu_type()) { 1099 1082 case CPU_20KC: 1100 1083 /* 1101 1084 * Some older 20Kc chips doesn't have the 'VI' bit in ··· 1224 1207 * processors don't have a S-cache that would be relevant to the 1225 1208 * Linux memory management. 1226 1209 */ 1227 - switch (c->cputype) { 1210 + switch (current_cpu_type()) { 1228 1211 case CPU_R4000SC: 1229 1212 case CPU_R4000MC: 1230 1213 case CPU_R4400SC: ··· 1401 1384 { 1402 1385 extern char __weak except_vec2_generic; 1403 1386 extern char __weak except_vec2_sb1; 1404 - struct cpuinfo_mips *c = &current_cpu_data; 1405 1387 1406 - switch (c->cputype) { 1388 + switch (current_cpu_type()) { 1407 1389 case CPU_SB1: 1408 1390 case CPU_SB1A: 1409 1391 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
+1
arch/mips/mm/dma-default.c
··· 18 18 #include <linux/highmem.h> 19 19 20 20 #include <asm/cache.h> 21 + #include <asm/cpu-type.h> 21 22 #include <asm/io.h> 22 23 23 24 #include <dma-coherence.h>
+1
arch/mips/mm/page.c
··· 18 18 19 19 #include <asm/bugs.h> 20 20 #include <asm/cacheops.h> 21 + #include <asm/cpu-type.h> 21 22 #include <asm/inst.h> 22 23 #include <asm/io.h> 23 24 #include <asm/page.h>
+2 -1
arch/mips/mm/sc-mips.c
··· 6 6 #include <linux/sched.h> 7 7 #include <linux/mm.h> 8 8 9 + #include <asm/cpu-type.h> 9 10 #include <asm/mipsregs.h> 10 11 #include <asm/bcache.h> 11 12 #include <asm/cacheops.h> ··· 72 71 unsigned int tmp; 73 72 74 73 /* Check the bypass bit (L2B) */ 75 - switch (c->cputype) { 74 + switch (current_cpu_type()) { 76 75 case CPU_34K: 77 76 case CPU_74K: 78 77 case CPU_1004K:
+1
arch/mips/mm/tlb-r4k.c
··· 16 16 #include <linux/module.h> 17 17 18 18 #include <asm/cpu.h> 19 + #include <asm/cpu-type.h> 19 20 #include <asm/bootinfo.h> 20 21 #include <asm/mmu_context.h> 21 22 #include <asm/pgtable.h>
+1
arch/mips/mm/tlbex.c
··· 30 30 #include <linux/cache.h> 31 31 32 32 #include <asm/cacheflush.h> 33 + #include <asm/cpu-type.h> 33 34 #include <asm/pgtable.h> 34 35 #include <asm/war.h> 35 36 #include <asm/uasm.h>
+3 -2
arch/mips/mti-malta/malta-time.c
··· 27 27 #include <linux/timex.h> 28 28 #include <linux/mc146818rtc.h> 29 29 30 + #include <asm/cpu.h> 30 31 #include <asm/mipsregs.h> 31 32 #include <asm/mipsmtregs.h> 32 33 #include <asm/hardirq.h> ··· 77 76 #endif 78 77 79 78 #if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) 80 - unsigned int prid = read_c0_prid() & 0xffff00; 79 + unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); 81 80 82 81 /* 83 82 * XXXKYMA: hardwire the CPU frequency to Host Freq/4 ··· 170 169 171 170 void __init plat_time_init(void) 172 171 { 173 - unsigned int prid = read_c0_prid() & 0xffff00; 172 + unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); 174 173 unsigned int freq; 175 174 176 175 estimate_frequencies();
+2 -1
arch/mips/mti-sead3/sead3-time.c
··· 7 7 */ 8 8 #include <linux/init.h> 9 9 10 + #include <asm/cpu.h> 10 11 #include <asm/setup.h> 11 12 #include <asm/time.h> 12 13 #include <asm/irq.h> ··· 35 34 */ 36 35 static unsigned int __init estimate_cpu_frequency(void) 37 36 { 38 - unsigned int prid = read_c0_prid() & 0xffff00; 37 + unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK); 39 38 unsigned int tick = 0; 40 39 unsigned int freq; 41 40 unsigned int orig;
+2 -1
arch/mips/netlogic/xlr/fmn-config.c
··· 36 36 #include <linux/irq.h> 37 37 #include <linux/interrupt.h> 38 38 39 + #include <asm/cpu.h> 39 40 #include <asm/mipsregs.h> 40 41 #include <asm/netlogic/xlr/fmn.h> 41 42 #include <asm/netlogic/xlr/xlr.h> ··· 188 187 int processor_id, num_core; 189 188 190 189 num_core = hweight32(nlm_current_node()->coremask); 191 - processor_id = read_c0_prid() & 0xff00; 190 + processor_id = read_c0_prid() & PRID_IMP_MASK; 192 191 193 192 setup_cpu_fmninfo(cpu, num_core); 194 193 switch (processor_id) {
+1
arch/mips/oprofile/common.c
··· 12 12 #include <linux/oprofile.h> 13 13 #include <linux/smp.h> 14 14 #include <asm/cpu-info.h> 15 + #include <asm/cpu-type.h> 15 16 16 17 #include "op_impl.h" 17 18
+1
arch/mips/pci/pci-bcm1480.c
··· 39 39 #include <linux/mm.h> 40 40 #include <linux/console.h> 41 41 #include <linux/tty.h> 42 + #include <linux/vt.h> 42 43 43 44 #include <asm/sibyte/bcm1480_regs.h> 44 45 #include <asm/sibyte/bcm1480_scd.h>
+2 -1
arch/mips/sibyte/bcm1480/setup.c
··· 22 22 #include <linux/string.h> 23 23 24 24 #include <asm/bootinfo.h> 25 + #include <asm/cpu.h> 25 26 #include <asm/mipsregs.h> 26 27 #include <asm/io.h> 27 28 #include <asm/sibyte/sb1250.h> ··· 120 119 uint64_t sys_rev; 121 120 int plldiv; 122 121 123 - sb1_pass = read_c0_prid() & 0xff; 122 + sb1_pass = read_c0_prid() & PRID_REV_MASK; 124 123 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); 125 124 soc_type = SYS_SOC_TYPE(sys_rev); 126 125 part_type = G_SYS_PART(sys_rev);
+2 -1
arch/mips/sibyte/sb1250/setup.c
··· 22 22 #include <linux/string.h> 23 23 24 24 #include <asm/bootinfo.h> 25 + #include <asm/cpu.h> 25 26 #include <asm/mipsregs.h> 26 27 #include <asm/io.h> 27 28 #include <asm/sibyte/sb1250.h> ··· 183 182 int plldiv; 184 183 int bad_config = 0; 185 184 186 - sb1_pass = read_c0_prid() & 0xff; 185 + sb1_pass = read_c0_prid() & PRID_REV_MASK; 187 186 sys_rev = __raw_readq(IOADDR(A_SCD_SYSTEM_REVISION)); 188 187 soc_type = SYS_SOC_TYPE(sys_rev); 189 188 soc_pass = G_SYS_REVISION(sys_rev);
+2 -1
arch/mips/sni/setup.c
··· 25 25 #endif 26 26 27 27 #include <asm/bootinfo.h> 28 + #include <asm/cpu.h> 28 29 #include <asm/io.h> 29 30 #include <asm/reboot.h> 30 31 #include <asm/sni.h> ··· 174 173 system_type = "RM300-Cxx"; 175 174 break; 176 175 case SNI_BRD_PCI_DESKTOP: 177 - switch (read_c0_prid() & 0xff00) { 176 + switch (read_c0_prid() & PRID_IMP_MASK) { 178 177 case PRID_IMP_R4600: 179 178 case PRID_IMP_R4700: 180 179 system_type = "RM200-C20";
+1 -1
arch/tile/Kconfig
··· 361 361 362 362 config VMALLOC_RESERVE 363 363 hex 364 - default 0x1000000 364 + default 0x2000000 365 365 366 366 config HARDWALL 367 367 bool "Hardwall support to allow access to user dynamic network"
+44 -46
arch/tile/gxio/iorpc_mpipe.c
··· 21 21 unsigned int flags; 22 22 }; 23 23 24 - int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, 24 + int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, 25 25 unsigned int count, unsigned int first, 26 26 unsigned int flags) 27 27 { ··· 45 45 unsigned int buffer_size_enum; 46 46 }; 47 47 48 - int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, 48 + int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, 49 49 void *mem_va, size_t mem_size, 50 50 unsigned int mem_flags, unsigned int stack, 51 51 unsigned int buffer_size_enum) ··· 80 80 unsigned int flags; 81 81 }; 82 82 83 - int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, 83 + int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, 84 84 unsigned int count, unsigned int first, 85 85 unsigned int flags) 86 86 { ··· 102 102 unsigned int ring; 103 103 }; 104 104 105 - int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 105 + int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 106 106 size_t mem_size, unsigned int mem_flags, 107 107 unsigned int ring) 108 108 { ··· 133 133 unsigned int ring; 134 134 }; 135 135 136 - int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, 136 + int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, 137 137 int inter_x, int inter_y, 138 138 int inter_ipi, int inter_event, 139 139 unsigned int ring) ··· 158 158 unsigned int ring; 159 159 }; 160 160 161 - int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, 161 + int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, 162 162 unsigned int ring) 163 163 { 164 164 struct enable_notif_ring_interrupt_param temp; ··· 179 179 unsigned int flags; 180 180 }; 181 181 182 - int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, 182 + int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, 183 183 unsigned int count, unsigned int first, 184 184 unsigned int flags) 185 185 { ··· 201 201 gxio_mpipe_notif_group_bits_t bits; 202 202 }; 203 203 204 - int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, 204 + int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, 205 205 unsigned int group, 206 206 gxio_mpipe_notif_group_bits_t bits) 207 207 { ··· 223 223 unsigned int flags; 224 224 }; 225 225 226 - int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, 226 + int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, 227 227 unsigned int first, unsigned int flags) 228 228 { 229 229 struct alloc_buckets_param temp; ··· 244 244 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info; 245 245 }; 246 246 247 - int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, 247 + int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, 248 248 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info) 249 249 { 250 250 struct init_bucket_param temp; ··· 265 265 unsigned int flags; 266 266 }; 267 267 268 - int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, 268 + int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, 269 269 unsigned int count, unsigned int first, 270 270 unsigned int flags) 271 271 { ··· 288 288 unsigned int channel; 289 289 }; 290 290 291 - int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 291 + int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 292 292 size_t mem_size, unsigned int mem_flags, 293 293 unsigned int ring, unsigned int channel) 294 294 { ··· 315 315 EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux); 316 316 317 317 318 - int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, 318 + int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob, 319 319 size_t blob_size) 320 320 { 321 321 const void *params = blob; ··· 332 332 unsigned int flags; 333 333 }; 334 334 335 - int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, 335 + int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, 336 336 unsigned int iotlb, HV_PTE pte, 337 337 unsigned int flags) 338 338 { ··· 355 355 unsigned int flags; 356 356 }; 357 357 358 - int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, 358 + int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context, 359 359 _gxio_mpipe_link_name_t name, unsigned int flags) 360 360 { 361 361 struct link_open_aux_param temp; ··· 374 374 int mac; 375 375 }; 376 376 377 - int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac) 377 + int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac) 378 378 { 379 379 struct link_close_aux_param temp; 380 380 struct link_close_aux_param *params = &temp; ··· 393 393 int64_t val; 394 394 }; 395 395 396 - int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, 396 + int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac, 397 397 uint32_t attr, int64_t val) 398 398 { 399 399 struct link_set_attr_aux_param temp; ··· 415 415 uint64_t cycles; 416 416 }; 417 417 418 - int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, 419 - uint64_t * nsec, uint64_t * cycles) 418 + int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, 419 + uint64_t *nsec, uint64_t *cycles) 420 420 { 421 421 int __result; 422 422 struct get_timestamp_aux_param temp; ··· 440 440 uint64_t cycles; 441 441 }; 442 442 443 - int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, 443 + int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec, 444 444 uint64_t nsec, uint64_t cycles) 445 445 { 446 446 struct set_timestamp_aux_param temp; ··· 460 460 int64_t nsec; 461 461 }; 462 462 463 - int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, 464 - int64_t nsec) 463 + int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec) 465 464 { 466 465 struct adjust_timestamp_aux_param temp; 467 466 struct adjust_timestamp_aux_param *params = &temp; ··· 474 475 475 476 EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); 476 477 477 - struct adjust_timestamp_freq_param { 478 - int32_t ppb; 479 - }; 480 - 481 - int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, 482 - int32_t ppb) 483 - { 484 - struct adjust_timestamp_freq_param temp; 485 - struct adjust_timestamp_freq_param *params = &temp; 486 - 487 - params->ppb = ppb; 488 - 489 - return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 490 - sizeof(*params), 491 - GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ); 492 - } 493 - 494 - EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq); 495 - 496 478 struct config_edma_ring_blks_param { 497 479 unsigned int ering; 498 480 unsigned int max_blks; ··· 481 501 unsigned int db; 482 502 }; 483 503 484 - int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context, 504 + int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context, 485 505 unsigned int ering, unsigned int max_blks, 486 506 unsigned int min_snf_blks, unsigned int db) 487 507 { ··· 500 520 501 521 EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks); 502 522 523 + struct adjust_timestamp_freq_param { 524 + int32_t ppb; 525 + }; 526 + 527 + int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb) 528 + { 529 + struct adjust_timestamp_freq_param temp; 530 + struct adjust_timestamp_freq_param *params = &temp; 531 + 532 + params->ppb = ppb; 533 + 534 + return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, 535 + sizeof(*params), 536 + GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ); 537 + } 538 + 539 + EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq); 540 + 503 541 struct arm_pollfd_param { 504 542 union iorpc_pollfd pollfd; 505 543 }; 506 544 507 - int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) 545 + int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) 508 546 { 509 547 struct arm_pollfd_param temp; 510 548 struct arm_pollfd_param *params = &temp; ··· 539 541 union iorpc_pollfd pollfd; 540 542 }; 541 543 542 - int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie) 544 + int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) 543 545 { 544 546 struct close_pollfd_param temp; 545 547 struct close_pollfd_param *params = &temp; ··· 556 558 HV_PTE base; 557 559 }; 558 560 559 - int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base) 561 + int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base) 560 562 { 561 563 int __result; 562 564 struct get_mmio_base_param temp; ··· 577 579 unsigned long size; 578 580 }; 579 581 580 - int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, 582 + int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context, 581 583 unsigned long offset, unsigned long size) 582 584 { 583 585 struct check_mmio_offset_param temp;
+7 -8
arch/tile/gxio/iorpc_mpipe_info.c
··· 15 15 /* This file is machine-generated; DO NOT EDIT! */ 16 16 #include "gxio/iorpc_mpipe_info.h" 17 17 18 - 19 18 struct instance_aux_param { 20 19 _gxio_mpipe_link_name_t name; 21 20 }; 22 21 23 - int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, 22 + int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, 24 23 _gxio_mpipe_link_name_t name) 25 24 { 26 25 struct instance_aux_param temp; ··· 38 39 _gxio_mpipe_link_mac_t mac; 39 40 }; 40 41 41 - int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 42 + int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, 42 43 unsigned int idx, 43 - _gxio_mpipe_link_name_t * name, 44 - _gxio_mpipe_link_mac_t * mac) 44 + _gxio_mpipe_link_name_t *name, 45 + _gxio_mpipe_link_mac_t *mac) 45 46 { 46 47 int __result; 47 48 struct enumerate_aux_param temp; ··· 49 50 50 51 __result = 51 52 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), 52 - (((uint64_t) idx << 32) | 53 + (((uint64_t)idx << 32) | 53 54 GXIO_MPIPE_INFO_OP_ENUMERATE_AUX)); 54 55 *name = params->name; 55 56 *mac = params->mac; ··· 63 64 HV_PTE base; 64 65 }; 65 66 66 - int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, 67 + int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, 67 68 HV_PTE *base) 68 69 { 69 70 int __result; ··· 85 86 unsigned long size; 86 87 }; 87 88 88 - int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, 89 + int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context, 89 90 unsigned long offset, unsigned long size) 90 91 { 91 92 struct check_mmio_offset_param temp;
+14 -14
arch/tile/gxio/iorpc_trio.c
··· 21 21 unsigned int flags; 22 22 }; 23 23 24 - int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, 24 + int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, 25 25 unsigned int first, unsigned int flags) 26 26 { 27 27 struct alloc_asids_param temp; ··· 44 44 unsigned int flags; 45 45 }; 46 46 47 - int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, 47 + int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, 48 48 unsigned int count, unsigned int first, 49 49 unsigned int flags) 50 50 { ··· 67 67 unsigned int flags; 68 68 }; 69 69 70 - int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, 70 + int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, 71 71 unsigned int count, unsigned int first, 72 72 unsigned int flags) 73 73 { ··· 91 91 unsigned int flags; 92 92 }; 93 93 94 - int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, 94 + int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, 95 95 unsigned int count, unsigned int first, 96 96 unsigned int flags) 97 97 { ··· 115 115 unsigned int flags; 116 116 }; 117 117 118 - int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, 118 + int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, 119 119 unsigned int pio_region, unsigned int mac, 120 120 uint32_t bus_address_hi, unsigned int flags) 121 121 { ··· 145 145 unsigned int order_mode; 146 146 }; 147 147 148 - int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, 148 + int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, 149 149 unsigned int map, unsigned long va, 150 150 uint64_t size, unsigned int asid, 151 151 unsigned int mac, uint64_t bus_address, ··· 175 175 struct pcie_trio_ports_property trio_ports; 176 176 }; 177 177 178 - int gxio_trio_get_port_property(gxio_trio_context_t * context, 178 + int gxio_trio_get_port_property(gxio_trio_context_t *context, 179 179 struct pcie_trio_ports_property *trio_ports) 180 180 { 181 181 int __result; ··· 198 198 unsigned int intx; 199 199 }; 200 200 201 - int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, 201 + int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, 202 202 int inter_y, int inter_ipi, int inter_event, 203 203 unsigned int mac, unsigned int intx) 204 204 { ··· 227 227 unsigned int asid; 228 228 }; 229 229 230 - int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, 230 + int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, 231 231 int inter_y, int inter_ipi, int inter_event, 232 232 unsigned int mac, unsigned int mem_map, 233 233 uint64_t mem_map_base, uint64_t mem_map_limit, ··· 259 259 unsigned int mac; 260 260 }; 261 261 262 - int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, 262 + int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, 263 263 uint16_t mrs, unsigned int mac) 264 264 { 265 265 struct set_mps_mrs_param temp; ··· 279 279 unsigned int mac; 280 280 }; 281 281 282 - int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac) 282 + int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac) 283 283 { 284 284 struct force_rc_link_up_param temp; 285 285 struct force_rc_link_up_param *params = &temp; ··· 296 296 unsigned int mac; 297 297 }; 298 298 299 - int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac) 299 + int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac) 300 300 { 301 301 struct force_ep_link_up_param temp; 302 302 struct force_ep_link_up_param *params = &temp; ··· 313 313 HV_PTE base; 314 314 }; 315 315 316 - int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base) 316 + int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base) 317 317 { 318 318 int __result; 319 319 struct get_mmio_base_param temp; ··· 334 334 unsigned long size; 335 335 }; 336 336 337 - int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, 337 + int gxio_trio_check_mmio_offset(gxio_trio_context_t *context, 338 338 unsigned long offset, unsigned long size) 339 339 { 340 340 struct check_mmio_offset_param temp;
+4 -4
arch/tile/gxio/iorpc_usb_host.c
··· 19 19 union iorpc_interrupt interrupt; 20 20 }; 21 21 22 - int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, 22 + int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, 23 23 int inter_y, int inter_ipi, int inter_event) 24 24 { 25 25 struct cfg_interrupt_param temp; ··· 41 41 unsigned int flags; 42 42 }; 43 43 44 - int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, 44 + int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, 45 45 HV_PTE pte, unsigned int flags) 46 46 { 47 47 struct register_client_memory_param temp; ··· 61 61 HV_PTE base; 62 62 }; 63 63 64 - int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base) 64 + int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base) 65 65 { 66 66 int __result; 67 67 struct get_mmio_base_param temp; ··· 82 82 unsigned long size; 83 83 }; 84 84 85 - int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, 85 + int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, 86 86 unsigned long offset, unsigned long size) 87 87 { 88 88 struct check_mmio_offset_param temp;
+4 -4
arch/tile/gxio/usb_host.c
··· 26 26 #include <gxio/kiorpc.h> 27 27 #include <gxio/usb_host.h> 28 28 29 - int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, 29 + int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, 30 30 int is_ehci) 31 31 { 32 32 char file[32]; ··· 63 63 64 64 EXPORT_SYMBOL_GPL(gxio_usb_host_init); 65 65 66 - int gxio_usb_host_destroy(gxio_usb_host_context_t * context) 66 + int gxio_usb_host_destroy(gxio_usb_host_context_t *context) 67 67 { 68 68 iounmap((void __force __iomem *)(context->mmio_base)); 69 69 hv_dev_close(context->fd); ··· 76 76 77 77 EXPORT_SYMBOL_GPL(gxio_usb_host_destroy); 78 78 79 - void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context) 79 + void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context) 80 80 { 81 81 return context->mmio_base; 82 82 } 83 83 84 84 EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start); 85 85 86 - size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context) 86 + size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context) 87 87 { 88 88 return HV_USB_HOST_MMIO_SIZE; 89 89 }
+18 -6
arch/tile/include/arch/mpipe.h
··· 176 176 */ 177 177 uint_reg_t stack_idx : 5; 178 178 /* Reserved. */ 179 - uint_reg_t __reserved_2 : 5; 179 + uint_reg_t __reserved_2 : 3; 180 + /* 181 + * Instance ID. For devices that support automatic buffer return between 182 + * mPIPE instances, this field indicates the buffer owner. If the INST 183 + * field does not match the mPIPE's instance number when a packet is 184 + * egressed, buffers with HWB set will be returned to the other mPIPE 185 + * instance. Note that not all devices support multi-mPIPE buffer 186 + * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates 187 + * whether the INST field in the buffer descriptor is populated by iDMA 188 + * hardware. This field is ignored on writes. 189 + */ 190 + uint_reg_t inst : 2; 180 191 /* 181 192 * Reads as one to indicate that this is a hardware managed buffer. 182 193 * Ignored on writes since all buffers on a given stack are the same size. ··· 216 205 uint_reg_t c : 2; 217 206 uint_reg_t size : 3; 218 207 uint_reg_t hwb : 1; 219 - uint_reg_t __reserved_2 : 5; 208 + uint_reg_t inst : 2; 209 + uint_reg_t __reserved_2 : 3; 220 210 uint_reg_t stack_idx : 5; 221 211 uint_reg_t __reserved_1 : 6; 222 212 int_reg_t va : 35; ··· 243 231 /* Reserved. */ 244 232 uint_reg_t __reserved_0 : 3; 245 233 /* eDMA ring being accessed */ 246 - uint_reg_t ring : 5; 234 + uint_reg_t ring : 6; 247 235 /* Reserved. */ 248 - uint_reg_t __reserved_1 : 18; 236 + uint_reg_t __reserved_1 : 17; 249 237 /* 250 238 * This field of the address selects the region (address space) to be 251 239 * accessed. For the egress DMA post region, this field must be 5. ··· 262 250 uint_reg_t svc_dom : 5; 263 251 uint_reg_t __reserved_2 : 6; 264 252 uint_reg_t region : 3; 265 - uint_reg_t __reserved_1 : 18; 266 - uint_reg_t ring : 5; 253 + uint_reg_t __reserved_1 : 17; 254 + uint_reg_t ring : 6; 267 255 uint_reg_t __reserved_0 : 3; 268 256 #endif 269 257 };
+3 -3
arch/tile/include/arch/mpipe_constants.h
··· 16 16 #ifndef __ARCH_MPIPE_CONSTANTS_H__ 17 17 #define __ARCH_MPIPE_CONSTANTS_H__ 18 18 19 - #define MPIPE_NUM_CLASSIFIERS 10 19 + #define MPIPE_NUM_CLASSIFIERS 16 20 20 #define MPIPE_CLS_MHZ 1200 21 21 22 - #define MPIPE_NUM_EDMA_RINGS 32 22 + #define MPIPE_NUM_EDMA_RINGS 64 23 23 24 24 #define MPIPE_NUM_SGMII_MACS 16 25 - #define MPIPE_NUM_XAUI_MACS 4 25 + #define MPIPE_NUM_XAUI_MACS 16 26 26 #define MPIPE_NUM_LOOPBACK_CHANNELS 4 27 27 #define MPIPE_NUM_NON_LB_CHANNELS 28 28 28
+33 -21
arch/tile/include/arch/mpipe_shm.h
··· 44 44 * descriptors toggles each time the ring tail pointer wraps. 45 45 */ 46 46 uint_reg_t gen : 1; 47 + /** 48 + * For devices with EDMA reorder support, this field allows the 49 + * descriptor to select the egress FIFO. The associated DMA ring must 50 + * have ALLOW_EFIFO_SEL enabled. 51 + */ 52 + uint_reg_t efifo_sel : 6; 47 53 /** Reserved. Must be zero. */ 48 - uint_reg_t r0 : 7; 54 + uint_reg_t r0 : 1; 49 55 /** Checksum generation enabled for this transfer. */ 50 56 uint_reg_t csum : 1; 51 57 /** ··· 116 110 uint_reg_t notif : 1; 117 111 uint_reg_t ns : 1; 118 112 uint_reg_t csum : 1; 119 - uint_reg_t r0 : 7; 113 + uint_reg_t r0 : 1; 114 + uint_reg_t efifo_sel : 6; 120 115 uint_reg_t gen : 1; 121 116 #endif 122 117 ··· 133 126 /** Reserved. */ 134 127 uint_reg_t __reserved_1 : 3; 135 128 /** 136 - * Instance ID. For devices that support more than one mPIPE instance, 137 - * this field indicates the buffer owner. If the INST field does not 138 - * match the mPIPE's instance number when a packet is egressed, buffers 139 - * with HWB set will be returned to the other mPIPE instance. 129 + * Instance ID. For devices that support automatic buffer return between 130 + * mPIPE instances, this field indicates the buffer owner. If the INST 131 + * field does not match the mPIPE's instance number when a packet is 132 + * egressed, buffers with HWB set will be returned to the other mPIPE 133 + * instance. Note that not all devices support multi-mPIPE buffer 134 + * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates 135 + * whether the INST field in the buffer descriptor is populated by iDMA 136 + * hardware. 140 137 */ 141 - uint_reg_t inst : 1; 142 - /** Reserved. */ 143 - uint_reg_t __reserved_2 : 1; 138 + uint_reg_t inst : 2; 144 139 /** 145 140 * Always set to one by hardware in iDMA packet descriptors. For eDMA, 146 141 * indicates whether the buffer will be released to the buffer stack ··· 175 166 uint_reg_t c : 2; 176 167 uint_reg_t size : 3; 177 168 uint_reg_t hwb : 1; 178 - uint_reg_t __reserved_2 : 1; 179 - uint_reg_t inst : 1; 169 + uint_reg_t inst : 2; 180 170 uint_reg_t __reserved_1 : 3; 181 171 uint_reg_t stack_idx : 5; 182 172 uint_reg_t __reserved_0 : 6; ··· 416 408 /** 417 409 * Sequence number applied when packet is distributed. Classifier 418 410 * selects which sequence number is to be applied by writing the 13-bit 419 - * SQN-selector into this field. 411 + * SQN-selector into this field. For devices that support EXT_SQN (as 412 + * indicated in IDMA_INFO.EXT_SQN_SUPPORT), the GP_SQN can be extended to 413 + * 32-bits via the IDMA_CTL.EXT_SQN register. In this case the 414 + * PACKET_SQN will be reduced to 32 bits. 420 415 */ 421 416 uint_reg_t gp_sqn : 16; 422 417 /** ··· 462 451 /** Reserved. */ 463 452 uint_reg_t __reserved_5 : 3; 464 453 /** 465 - * Instance ID. For devices that support more than one mPIPE instance, 466 - * this field indicates the buffer owner. If the INST field does not 467 - * match the mPIPE's instance number when a packet is egressed, buffers 468 - * with HWB set will be returned to the other mPIPE instance. 454 + * Instance ID. For devices that support automatic buffer return between 455 + * mPIPE instances, this field indicates the buffer owner. If the INST 456 + * field does not match the mPIPE's instance number when a packet is 457 + * egressed, buffers with HWB set will be returned to the other mPIPE 458 + * instance. Note that not all devices support multi-mPIPE buffer 459 + * return. The MPIPE_EDMA_INFO.REMOTE_BUFF_RTN_SUPPORT bit indicates 460 + * whether the INST field in the buffer descriptor is populated by iDMA 461 + * hardware. 469 462 */ 470 - uint_reg_t inst : 1; 471 - /** Reserved. */ 472 - uint_reg_t __reserved_6 : 1; 463 + uint_reg_t inst : 2; 473 464 /** 474 465 * Always set to one by hardware in iDMA packet descriptors. For eDMA, 475 466 * indicates whether the buffer will be released to the buffer stack ··· 504 491 uint_reg_t c : 2; 505 492 uint_reg_t size : 3; 506 493 uint_reg_t hwb : 1; 507 - uint_reg_t __reserved_6 : 1; 508 - uint_reg_t inst : 1; 494 + uint_reg_t inst : 2; 509 495 uint_reg_t __reserved_5 : 3; 510 496 uint_reg_t stack_idx : 5; 511 497 uint_reg_t __reserved_4 : 6;
+5 -5
arch/tile/include/arch/trio_constants.h
··· 16 16 #ifndef __ARCH_TRIO_CONSTANTS_H__ 17 17 #define __ARCH_TRIO_CONSTANTS_H__ 18 18 19 - #define TRIO_NUM_ASIDS 16 19 + #define TRIO_NUM_ASIDS 32 20 20 #define TRIO_NUM_TLBS_PER_ASID 16 21 21 22 22 #define TRIO_NUM_TPIO_REGIONS 8 23 23 #define TRIO_LOG2_NUM_TPIO_REGIONS 3 24 24 25 - #define TRIO_NUM_MAP_MEM_REGIONS 16 26 - #define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4 25 + #define TRIO_NUM_MAP_MEM_REGIONS 32 26 + #define TRIO_LOG2_NUM_MAP_MEM_REGIONS 5 27 27 #define TRIO_NUM_MAP_SQ_REGIONS 8 28 28 #define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3 29 29 30 30 #define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6 31 31 32 - #define TRIO_NUM_PUSH_DMA_RINGS 32 32 + #define TRIO_NUM_PUSH_DMA_RINGS 64 33 33 34 - #define TRIO_NUM_PULL_DMA_RINGS 32 34 + #define TRIO_NUM_PULL_DMA_RINGS 64 35 35 36 36 #endif /* __ARCH_TRIO_CONSTANTS_H__ */
+2 -3
arch/tile/include/asm/page.h
··· 182 182 183 183 #define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1))) 184 184 #define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */ 185 - #define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */ 186 - #define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ 185 + #define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */ 186 + #define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */ 187 187 #define _VMALLOC_START FIXADDR_TOP 188 - #define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */ 189 188 #define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */ 190 189 #define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */ 191 190 #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
+2 -10
arch/tile/include/asm/pgtable_32.h
··· 55 55 #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) 56 56 57 57 #ifdef CONFIG_HIGHMEM 58 - # define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) 58 + # define _VMALLOC_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) 59 59 #else 60 - # define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) 61 - #endif 62 - 63 - #ifdef CONFIG_HUGEVMAP 64 - #define HUGE_VMAP_END __VMAPPING_END 65 - #define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) 66 - #define _VMALLOC_END HUGE_VMAP_BASE 67 - #else 68 - #define _VMALLOC_END __VMAPPING_END 60 + # define _VMALLOC_END (FIXADDR_START & ~(HPAGE_SIZE-1)) 69 61 #endif 70 62 71 63 /*
+1 -3
arch/tile/include/asm/pgtable_64.h
··· 52 52 * memory allocation code). The vmalloc code puts in an internal 53 53 * guard page between each allocation. 54 54 */ 55 - #define _VMALLOC_END HUGE_VMAP_BASE 55 + #define _VMALLOC_END MEM_SV_START 56 56 #define VMALLOC_END _VMALLOC_END 57 57 #define VMALLOC_START _VMALLOC_START 58 - 59 - #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) 60 58 61 59 #ifndef __ASSEMBLY__ 62 60
+26 -26
arch/tile/include/gxio/iorpc_mpipe.h
··· 56 56 #define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 57 57 #define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 58 58 59 - int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context, 59 + int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, 60 60 unsigned int count, unsigned int first, 61 61 unsigned int flags); 62 62 63 - int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context, 63 + int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, 64 64 void *mem_va, size_t mem_size, 65 65 unsigned int mem_flags, unsigned int stack, 66 66 unsigned int buffer_size_enum); 67 67 68 68 69 - int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context, 69 + int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, 70 70 unsigned int count, unsigned int first, 71 71 unsigned int flags); 72 72 73 - int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 73 + int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 74 74 size_t mem_size, unsigned int mem_flags, 75 75 unsigned int ring); 76 76 77 - int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context, 77 + int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, 78 78 int inter_x, int inter_y, 79 79 int inter_ipi, int inter_event, 80 80 unsigned int ring); 81 81 82 - int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context, 82 + int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, 83 83 unsigned int ring); 84 84 85 - int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context, 85 + int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, 86 86 unsigned int count, unsigned int first, 87 87 unsigned int flags); 88 88 89 - int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context, 89 + int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, 90 90 unsigned int group, 91 91 gxio_mpipe_notif_group_bits_t bits); 92 92 93 - int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count, 93 + int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, 94 94 unsigned int first, unsigned int flags); 95 95 96 - int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket, 96 + int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, 97 97 MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info); 98 98 99 - int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context, 99 + int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, 100 100 unsigned int count, unsigned int first, 101 101 unsigned int flags); 102 102 103 - int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va, 103 + int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 104 104 size_t mem_size, unsigned int mem_flags, 105 105 unsigned int ring, unsigned int channel); 106 106 107 107 108 - int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob, 108 + int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob, 109 109 size_t blob_size); 110 110 111 - int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context, 111 + int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, 112 112 unsigned int iotlb, HV_PTE pte, 113 113 unsigned int flags); 114 114 115 - int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context, 115 + int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context, 116 116 _gxio_mpipe_link_name_t name, unsigned int flags); 117 117 118 - int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); 118 + int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac); 119 119 120 - int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac, 120 + int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac, 121 121 uint32_t attr, int64_t val); 122 122 123 - int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, 124 - uint64_t * nsec, uint64_t * cycles); 123 + int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, 124 + uint64_t *nsec, uint64_t *cycles); 125 125 126 - int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec, 126 + int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec, 127 127 uint64_t nsec, uint64_t cycles); 128 128 129 - int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, 129 + int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, 130 130 int64_t nsec); 131 131 132 - int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context, 132 + int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, 133 133 int32_t ppb); 134 134 135 - int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 135 + int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); 136 136 137 - int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 137 + int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); 138 138 139 - int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base); 139 + int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base); 140 140 141 - int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context, 141 + int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context, 142 142 unsigned long offset, unsigned long size); 143 143 144 144 #endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
+6 -6
arch/tile/include/gxio/iorpc_mpipe_info.h
··· 33 33 #define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 34 34 35 35 36 - int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context, 36 + int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, 37 37 _gxio_mpipe_link_name_t name); 38 38 39 - int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 39 + int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, 40 40 unsigned int idx, 41 - _gxio_mpipe_link_name_t * name, 42 - _gxio_mpipe_link_mac_t * mac); 41 + _gxio_mpipe_link_name_t *name, 42 + _gxio_mpipe_link_mac_t *mac); 43 43 44 - int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context, 44 + int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, 45 45 HV_PTE *base); 46 46 47 - int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context, 47 + int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context, 48 48 unsigned long offset, unsigned long size); 49 49 50 50 #endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
+14 -14
arch/tile/include/gxio/iorpc_trio.h
··· 46 46 #define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 47 47 #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 48 48 49 - int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count, 49 + int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, 50 50 unsigned int first, unsigned int flags); 51 51 52 52 53 - int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context, 53 + int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, 54 54 unsigned int count, unsigned int first, 55 55 unsigned int flags); 56 56 57 57 58 - int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context, 58 + int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, 59 59 unsigned int count, unsigned int first, 60 60 unsigned int flags); 61 61 62 - int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, 62 + int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, 63 63 unsigned int count, unsigned int first, 64 64 unsigned int flags); 65 65 66 - int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context, 66 + int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, 67 67 unsigned int pio_region, unsigned int mac, 68 68 uint32_t bus_address_hi, unsigned int flags); 69 69 70 70 71 - int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context, 71 + int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, 72 72 unsigned int map, unsigned long va, 73 73 uint64_t size, unsigned int asid, 74 74 unsigned int mac, uint64_t bus_address, 75 75 unsigned int node, 76 76 unsigned int order_mode); 77 77 78 - int gxio_trio_get_port_property(gxio_trio_context_t * context, 78 + int gxio_trio_get_port_property(gxio_trio_context_t *context, 79 79 struct pcie_trio_ports_property *trio_ports); 80 80 81 - int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x, 81 + int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, 82 82 int inter_y, int inter_ipi, int inter_event, 83 83 unsigned int mac, unsigned int intx); 84 84 85 - int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x, 85 + int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, 86 86 int inter_y, int inter_ipi, int inter_event, 87 87 unsigned int mac, unsigned int mem_map, 88 88 uint64_t mem_map_base, uint64_t mem_map_limit, 89 89 unsigned int asid); 90 90 91 91 92 - int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps, 92 + int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, 93 93 uint16_t mrs, unsigned int mac); 94 94 95 - int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac); 95 + int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac); 96 96 97 - int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac); 97 + int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac); 98 98 99 - int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base); 99 + int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base); 100 100 101 - int gxio_trio_check_mmio_offset(gxio_trio_context_t * context, 101 + int gxio_trio_check_mmio_offset(gxio_trio_context_t *context, 102 102 unsigned long offset, unsigned long size); 103 103 104 104 #endif /* !__GXIO_TRIO_LINUX_RPC_H__ */
+4 -4
arch/tile/include/gxio/iorpc_usb_host.h
··· 31 31 #define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 32 32 #define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 33 33 34 - int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x, 34 + int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, 35 35 int inter_y, int inter_ipi, int inter_event); 36 36 37 - int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context, 37 + int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, 38 38 HV_PTE pte, unsigned int flags); 39 39 40 - int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, 40 + int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, 41 41 HV_PTE *base); 42 42 43 - int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context, 43 + int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, 44 44 unsigned long offset, unsigned long size); 45 45 46 46 #endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */
+4 -4
arch/tile/include/gxio/usb_host.h
··· 53 53 * @return Zero if the context was successfully initialized, else a 54 54 * GXIO_ERR_xxx error code. 55 55 */ 56 - extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index, 56 + extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, 57 57 int is_ehci); 58 58 59 59 /* Destroy a USB context. ··· 68 68 * @return Zero if the context was successfully destroyed, else a 69 69 * GXIO_ERR_xxx error code. 70 70 */ 71 - extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context); 71 + extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context); 72 72 73 73 /* Retrieve the address of the shim's MMIO registers. 74 74 * 75 75 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 76 76 * @return The address of the shim's MMIO registers. 77 77 */ 78 - extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context); 78 + extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context); 79 79 80 80 /* Retrieve the length of the shim's MMIO registers. 81 81 * 82 82 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 83 83 * @return The length of the shim's MMIO registers. 84 84 */ 85 - extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context); 85 + extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context); 86 86 87 87 #endif /* _GXIO_USB_H_ */
+1 -1
arch/tile/kernel/compat.c
··· 84 84 { 85 85 return sys_llseek(fd, offset_high, offset_low, result, origin); 86 86 } 87 - 87 + 88 88 /* Provide the compat syscall number to call mapping. */ 89 89 #undef __SYSCALL 90 90 #define __SYSCALL(nr, call) [nr] = (call),
-55
arch/tile/kernel/futex_64.S
··· 1 - /* 2 - * Copyright 2011 Tilera Corporation. All Rights Reserved. 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation, version 2. 7 - * 8 - * This program is distributed in the hope that it will be useful, but 9 - * WITHOUT ANY WARRANTY; without even the implied warranty of 10 - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 - * NON INFRINGEMENT. See the GNU General Public License for 12 - * more details. 13 - * 14 - * Atomically access user memory, but use MMU to avoid propagating 15 - * kernel exceptions. 16 - */ 17 - 18 - #include <linux/linkage.h> 19 - #include <asm/errno.h> 20 - #include <asm/futex.h> 21 - #include <asm/page.h> 22 - #include <asm/processor.h> 23 - 24 - /* 25 - * Provide a set of atomic memory operations supporting <asm/futex.h>. 26 - * 27 - * r0: user address to manipulate 28 - * r1: new value to write, or for cmpxchg, old value to compare against 29 - * r2: (cmpxchg only) new value to write 30 - * 31 - * Return __get_user struct, r0 with value, r1 with error. 32 - */ 33 - #define FUTEX_OP(name, ...) \ 34 - STD_ENTRY(futex_##name) \ 35 - __VA_ARGS__; \ 36 - { \ 37 - move r1, zero; \ 38 - jrp lr \ 39 - }; \ 40 - STD_ENDPROC(futex_##name); \ 41 - .pushsection __ex_table,"a"; \ 42 - .quad 1b, get_user_fault; \ 43 - .popsection 44 - 45 - .pushsection .fixup,"ax" 46 - get_user_fault: 47 - { movei r1, -EFAULT; jrp lr } 48 - ENDPROC(get_user_fault) 49 - .popsection 50 - 51 - FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2) 52 - FUTEX_OP(set, 1: exch4 r0, r0, r1) 53 - FUTEX_OP(add, 1: fetchadd4 r0, r0, r1) 54 - FUTEX_OP(or, 1: fetchor4 r0, r0, r1) 55 - FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1)
+1 -2
arch/tile/kernel/setup.c
··· 1268 1268 if ((long)VMALLOC_START >= 0) 1269 1269 early_panic( 1270 1270 "Linux VMALLOC region below the 2GB line (%#lx)!\n" 1271 - "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" 1272 - "or smaller VMALLOC_RESERVE.\n", 1271 + "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n", 1273 1272 VMALLOC_START); 1274 1273 #endif 1275 1274 }
+2 -2
arch/tile/kernel/unaligned.c
··· 551 551 /* 552 552 * This function generates unalign fixup JIT. 553 553 * 554 - * We fist find unalign load/store instruction's destination, source 555 - * reguisters: ra, rb and rd. and 3 scratch registers by calling 554 + * We first find unalign load/store instruction's destination, source 555 + * registers: ra, rb and rd. and 3 scratch registers by calling 556 556 * find_regs(...). 3 scratch clobbers should not alias with any register 557 557 * used in the fault bundle. Then analyze the fault bundle to determine 558 558 * if it's a load or store, operand width, branch or address increment etc.
-2
arch/tile/mm/fault.c
··· 149 149 pmd_k = vmalloc_sync_one(pgd, address); 150 150 if (!pmd_k) 151 151 return -1; 152 - if (pmd_huge(*pmd_k)) 153 - return 0; /* support TILE huge_vmap() API */ 154 152 pte_k = pte_offset_kernel(pmd_k, address); 155 153 if (!pte_present(*pte_k)) 156 154 return -1;
-4
arch/tile/mm/init.c
··· 828 828 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", 829 829 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); 830 830 #endif 831 - #ifdef CONFIG_HUGEVMAP 832 - printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", 833 - HUGE_VMAP_BASE, HUGE_VMAP_END - 1); 834 - #endif 835 831 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", 836 832 _VMALLOC_START, _VMALLOC_END - 1); 837 833 #ifdef __tilegx__
+1 -2
arch/tile/mm/pgtable.c
··· 127 127 } 128 128 129 129 /* Shatter the huge page into the preallocated L2 page table. */ 130 - pmd_populate_kernel(&init_mm, pmd, 131 - get_prealloc_pte(pte_pfn(*(pte_t *)pmd))); 130 + pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd))); 132 131 133 132 #ifdef __PAGETABLE_PMD_FOLDED 134 133 /* Walk every pgd on the system and update the pmd there. */
+3 -2
arch/x86/Kconfig
··· 481 481 bool "Intel Low Power Subsystem Support" 482 482 depends on ACPI 483 483 select COMMON_CLK 484 + select PINCTRL 484 485 ---help--- 485 486 Select to build support for Intel Low Power Subsystem such as 486 487 found on Intel Lynxpoint PCH. Selecting this option enables 487 - things like clock tree (common clock framework) which are needed 488 - by the LPSS peripheral drivers. 488 + things like clock tree (common clock framework) and pincontrol 489 + which are needed by the LPSS peripheral drivers. 489 490 490 491 config X86_RDC321X 491 492 bool "RDC R-321x SoC"
+2 -2
arch/x86/kernel/cpu/perf_event_intel.c
··· 899 899 static struct extra_reg intel_slm_extra_regs[] __read_mostly = 900 900 { 901 901 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ 902 - INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0), 903 - INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1), 902 + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0), 903 + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1), 904 904 EVENT_EXTRA_END 905 905 }; 906 906
+1
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 584 584 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ 585 585 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 586 586 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 587 + INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ 587 588 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ 588 589 EVENT_CONSTRAINT_END 589 590 };
-15
arch/x86/kernel/entry_64.S
··· 487 487 TRACE_IRQS_OFF 488 488 .endm 489 489 490 - ENTRY(save_rest) 491 - PARTIAL_FRAME 1 (REST_SKIP+8) 492 - movq 5*8+16(%rsp), %r11 /* save return address */ 493 - movq_cfi rbx, RBX+16 494 - movq_cfi rbp, RBP+16 495 - movq_cfi r12, R12+16 496 - movq_cfi r13, R13+16 497 - movq_cfi r14, R14+16 498 - movq_cfi r15, R15+16 499 - movq %r11, 8(%rsp) /* return address */ 500 - FIXUP_TOP_OF_STACK %r11, 16 501 - ret 502 - CFI_ENDPROC 503 - END(save_rest) 504 - 505 490 /* save complete stack frame */ 506 491 .pushsection .kprobes.text, "ax" 507 492 ENTRY(save_paranoid)
+2 -1
arch/x86/kernel/smpboot.c
··· 653 653 { 654 654 static int current_node = -1; 655 655 int node = early_cpu_to_node(cpu); 656 + int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS); 656 657 657 658 if (system_state == SYSTEM_BOOTING) { 658 659 if (node != current_node) { ··· 662 661 current_node = node; 663 662 pr_info("Booting Node %3d, Processors ", node); 664 663 } 665 - pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : ""); 664 + pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : ""); 666 665 return; 667 666 } else 668 667 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+13 -1
arch/x86/kvm/emulate.c
··· 2025 2025 return rc; 2026 2026 } 2027 2027 2028 + static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) 2029 + { 2030 + int rc; 2031 + 2032 + rc = em_ret_far(ctxt); 2033 + if (rc != X86EMUL_CONTINUE) 2034 + return rc; 2035 + rsp_increment(ctxt, ctxt->src.val); 2036 + return X86EMUL_CONTINUE; 2037 + } 2038 + 2028 2039 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) 2029 2040 { 2030 2041 /* Save real source value, then compare EAX against destination. */ ··· 3774 3763 G(ByteOp, group11), G(0, group11), 3775 3764 /* 0xC8 - 0xCF */ 3776 3765 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), 3777 - N, I(ImplicitOps | Stack, em_ret_far), 3766 + I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), 3767 + I(ImplicitOps | Stack, em_ret_far), 3778 3768 D(ImplicitOps), DI(SrcImmByte, intn), 3779 3769 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3780 3770 /* 0xD0 - 0xD7 */
+19 -1
arch/x86/kvm/paging_tmpl.h
··· 99 99 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; 100 100 gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; 101 101 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS]; 102 + bool pte_writable[PT_MAX_FULL_LEVELS]; 102 103 unsigned pt_access; 103 104 unsigned pte_access; 104 105 gfn_t gfn; ··· 236 235 if (pte == orig_pte) 237 236 continue; 238 237 238 + /* 239 + * If the slot is read-only, simply do not process the accessed 240 + * and dirty bits. This is the correct thing to do if the slot 241 + * is ROM, and page tables in read-as-ROM/write-as-MMIO slots 242 + * are only supported if the accessed and dirty bits are already 243 + * set in the ROM (so that MMIO writes are never needed). 244 + * 245 + * Note that NPT does not allow this at all and faults, since 246 + * it always wants nested page table entries for the guest 247 + * page tables to be writable. And EPT works but will simply 248 + * overwrite the read-only memory to set the accessed and dirty 249 + * bits. 250 + */ 251 + if (unlikely(!walker->pte_writable[level - 1])) 252 + continue; 253 + 239 254 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); 240 255 if (ret) 241 256 return ret; ··· 326 309 goto error; 327 310 real_gfn = gpa_to_gfn(real_gfn); 328 311 329 - host_addr = gfn_to_hva(vcpu->kvm, real_gfn); 312 + host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn, 313 + &walker->pte_writable[walker->level - 1]); 330 314 if (unlikely(kvm_is_error_hva(host_addr))) 331 315 goto error; 332 316
+13
arch/x86/kvm/vmx.c
··· 5339 5339 return 0; 5340 5340 } 5341 5341 5342 + /* 5343 + * EPT violation happened while executing iret from NMI, 5344 + * "blocked by NMI" bit has to be set before next VM entry. 5345 + * There are errata that may cause this bit to not be set: 5346 + * AAK134, BY25. 5347 + */ 5348 + if (exit_qualification & INTR_INFO_UNBLOCK_NMI) 5349 + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 5350 + 5342 5351 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 5343 5352 trace_kvm_page_fault(gpa, exit_qualification); 5344 5353 ··· 7775 7766 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7776 7767 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7777 7768 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7769 + __clear_bit(VCPU_EXREG_PDPTR, 7770 + (unsigned long *)&vcpu->arch.regs_avail); 7771 + __clear_bit(VCPU_EXREG_PDPTR, 7772 + (unsigned long *)&vcpu->arch.regs_dirty); 7778 7773 } 7779 7774 7780 7775 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
+15 -10
block/blk-cgroup.c
··· 235 235 blkg->online = true; 236 236 spin_unlock(&blkcg->lock); 237 237 238 - if (!ret) 238 + if (!ret) { 239 + if (blkcg == &blkcg_root) { 240 + q->root_blkg = blkg; 241 + q->root_rl.blkg = blkg; 242 + } 239 243 return blkg; 244 + } 240 245 241 246 /* @blkg failed fully initialized, use the usual release path */ 242 247 blkg_put(blkg); ··· 340 335 rcu_assign_pointer(blkcg->blkg_hint, NULL); 341 336 342 337 /* 338 + * If root blkg is destroyed. Just clear the pointer since root_rl 339 + * does not take reference on root blkg. 340 + */ 341 + if (blkcg == &blkcg_root) { 342 + blkg->q->root_blkg = NULL; 343 + blkg->q->root_rl.blkg = NULL; 344 + } 345 + 346 + /* 343 347 * Put the reference taken at the time of creation so that when all 344 348 * queues are gone, group can be destroyed. 345 349 */ ··· 374 360 blkg_destroy(blkg); 375 361 spin_unlock(&blkcg->lock); 376 362 } 377 - 378 - /* 379 - * root blkg is destroyed. Just clear the pointer since 380 - * root_rl does not take reference on root blkg. 381 - */ 382 - q->root_blkg = NULL; 383 - q->root_rl.blkg = NULL; 384 363 } 385 364 386 365 /* ··· 977 970 ret = PTR_ERR(blkg); 978 971 goto out_unlock; 979 972 } 980 - q->root_blkg = blkg; 981 - q->root_rl.blkg = blkg; 982 973 983 974 list_for_each_entry(blkg, &q->blkg_list, q_node) 984 975 cnt++;
+2 -4
block/blk-core.c
··· 1549 1549 if (plug) { 1550 1550 /* 1551 1551 * If this is the first request added after a plug, fire 1552 - * of a plug trace. If others have been added before, check 1553 - * if we have multiple devices in this plug. If so, make a 1554 - * note to sort the list before dispatch. 1552 + * of a plug trace. 1555 1553 */ 1556 - if (list_empty(&plug->list)) 1554 + if (!request_count) 1557 1555 trace_block_plug(q); 1558 1556 else { 1559 1557 if (request_count >= BLK_MAX_REQUEST_COUNT) {
+2 -2
block/blk-exec.c
··· 68 68 spin_lock_irq(q->queue_lock); 69 69 70 70 if (unlikely(blk_queue_dying(q))) { 71 + rq->cmd_flags |= REQ_QUIET; 71 72 rq->errors = -ENXIO; 72 - if (rq->end_io) 73 - rq->end_io(rq, rq->errors); 73 + __blk_end_request_all(rq, rq->errors); 74 74 spin_unlock_irq(q->queue_lock); 75 75 return; 76 76 }
+2 -2
block/cfq-iosched.c
··· 1803 1803 1804 1804 if (samples) { 1805 1805 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); 1806 - do_div(v, samples); 1806 + v = div64_u64(v, samples); 1807 1807 } 1808 1808 __blkg_prfill_u64(sf, pd, v); 1809 1809 return 0; ··· 4358 4358 if (!eq) 4359 4359 return -ENOMEM; 4360 4360 4361 - cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 4361 + cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 4362 4362 if (!cfqd) { 4363 4363 kobject_put(&eq->kobj); 4364 4364 return -ENOMEM;
+1 -1
block/deadline-iosched.c
··· 346 346 if (!eq) 347 347 return -ENOMEM; 348 348 349 - dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); 349 + dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 350 350 if (!dd) { 351 351 kobject_put(&eq->kobj); 352 352 return -ENOMEM;
+1 -1
block/elevator.c
··· 155 155 { 156 156 struct elevator_queue *eq; 157 157 158 - eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); 158 + eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); 159 159 if (unlikely(!eq)) 160 160 goto err; 161 161
+1 -2
block/genhd.c
··· 1252 1252 { 1253 1253 struct gendisk *disk; 1254 1254 1255 - disk = kmalloc_node(sizeof(struct gendisk), 1256 - GFP_KERNEL | __GFP_ZERO, node_id); 1255 + disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 1257 1256 if (disk) { 1258 1257 if (!init_part_stats(&disk->part0)) { 1259 1258 kfree(disk);
+1 -12
drivers/atm/he.c
··· 2865 2865 .id_table = he_pci_tbl, 2866 2866 }; 2867 2867 2868 - static int __init he_init(void) 2869 - { 2870 - return pci_register_driver(&he_driver); 2871 - } 2872 - 2873 - static void __exit he_cleanup(void) 2874 - { 2875 - pci_unregister_driver(&he_driver); 2876 - } 2877 - 2878 - module_init(he_init); 2879 - module_exit(he_cleanup); 2868 + module_pci_driver(he_driver);
+1 -1
drivers/atm/nicstar.c
··· 778 778 return error; 779 779 } 780 780 781 - if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) { 781 + if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { 782 782 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, 783 783 card->atmdev->esi, 6); 784 784 if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
+7 -5
drivers/bcma/scan.c
··· 269 269 return NULL; 270 270 } 271 271 272 + #define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO) 273 + 272 274 static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, 273 275 struct bcma_device_id *match, int core_num, 274 276 struct bcma_device *core) ··· 353 351 * the main register space for the core 354 352 */ 355 353 tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0); 356 - if (tmp == 0 || IS_ERR_VALUE(tmp)) { 354 + if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) { 357 355 /* Try again to see if it is a bridge */ 358 356 tmp = bcma_erom_get_addr_desc(bus, eromptr, 359 357 SCAN_ADDR_TYPE_BRIDGE, 0); 360 - if (tmp == 0 || IS_ERR_VALUE(tmp)) { 358 + if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) { 361 359 return -EILSEQ; 362 360 } else { 363 361 bcma_info(bus, "Bridge found\n"); ··· 371 369 for (j = 0; ; j++) { 372 370 tmp = bcma_erom_get_addr_desc(bus, eromptr, 373 371 SCAN_ADDR_TYPE_SLAVE, i); 374 - if (IS_ERR_VALUE(tmp)) { 372 + if (IS_ERR_VALUE_U32(tmp)) { 375 373 /* no more entries for port _i_ */ 376 374 /* pr_debug("erom: slave port %d " 377 375 * "has %d descriptors\n", i, j); */ ··· 388 386 for (j = 0; ; j++) { 389 387 tmp = bcma_erom_get_addr_desc(bus, eromptr, 390 388 SCAN_ADDR_TYPE_MWRAP, i); 391 - if (IS_ERR_VALUE(tmp)) { 389 + if (IS_ERR_VALUE_U32(tmp)) { 392 390 /* no more entries for port _i_ */ 393 391 /* pr_debug("erom: master wrapper %d " 394 392 * "has %d descriptors\n", i, j); */ ··· 406 404 for (j = 0; ; j++) { 407 405 tmp = bcma_erom_get_addr_desc(bus, eromptr, 408 406 SCAN_ADDR_TYPE_SWRAP, i + hack); 409 - if (IS_ERR_VALUE(tmp)) { 407 + if (IS_ERR_VALUE_U32(tmp)) { 410 408 /* no more entries for port _i_ */ 411 409 /* pr_debug("erom: master wrapper %d " 412 410 * has %d descriptors\n", i, j); */
+59 -18
drivers/block/rbd.c
··· 931 931 u64 snap_id) 932 932 { 933 933 u32 which; 934 + const char *snap_name; 934 935 935 936 which = rbd_dev_snap_index(rbd_dev, snap_id); 936 937 if (which == BAD_SNAP_INDEX) 937 - return NULL; 938 + return ERR_PTR(-ENOENT); 938 939 939 - return _rbd_dev_v1_snap_name(rbd_dev, which); 940 + snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); 941 + return snap_name ? snap_name : ERR_PTR(-ENOMEM); 940 942 } 941 943 942 944 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) ··· 2814 2812 obj_request_done_set(obj_request); 2815 2813 } 2816 2814 2817 - static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) 2815 + static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id) 2818 2816 { 2819 2817 struct rbd_obj_request *obj_request; 2820 2818 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; ··· 2829 2827 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); 2830 2828 if (!obj_request->osd_req) 2831 2829 goto out; 2832 - obj_request->callback = rbd_obj_request_put; 2833 2830 2834 2831 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, 2835 2832 notify_id, 0, 0); 2836 2833 rbd_osd_req_format_read(obj_request); 2837 2834 2838 2835 ret = rbd_obj_request_submit(osdc, obj_request); 2839 - out: 2840 2836 if (ret) 2841 - rbd_obj_request_put(obj_request); 2837 + goto out; 2838 + ret = rbd_obj_request_wait(obj_request); 2839 + out: 2840 + rbd_obj_request_put(obj_request); 2842 2841 2843 2842 return ret; 2844 2843 } ··· 2859 2856 if (ret) 2860 2857 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret); 2861 2858 2862 - rbd_obj_notify_ack(rbd_dev, notify_id); 2859 + rbd_obj_notify_ack_sync(rbd_dev, notify_id); 2863 2860 } 2864 2861 2865 2862 /* ··· 3331 3328 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 3332 3329 } 3333 3330 3331 + static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3332 + { 3333 + sector_t size; 3334 + bool removing; 3335 + 3336 + /* 3337 + * Don't hold the lock while doing disk operations, 3338 + * or lock ordering will conflict with the bdev mutex via: 3339 + * rbd_add() -> blkdev_get() -> rbd_open() 3340 + */ 3341 + spin_lock_irq(&rbd_dev->lock); 3342 + removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3343 + spin_unlock_irq(&rbd_dev->lock); 3344 + /* 3345 + * If the device is being removed, rbd_dev->disk has 3346 + * been destroyed, so don't try to update its size 3347 + */ 3348 + if (!removing) { 3349 + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3350 + dout("setting size to %llu sectors", (unsigned long long)size); 3351 + set_capacity(rbd_dev->disk, size); 3352 + revalidate_disk(rbd_dev->disk); 3353 + } 3354 + } 3355 + 3334 3356 static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3335 3357 { 3336 3358 u64 mapping_size; ··· 3375 3347 up_write(&rbd_dev->header_rwsem); 3376 3348 3377 3349 if (mapping_size != rbd_dev->mapping.size) { 3378 - sector_t size; 3379 - 3380 - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3381 - dout("setting size to %llu sectors", (unsigned long long)size); 3382 - set_capacity(rbd_dev->disk, size); 3383 - revalidate_disk(rbd_dev->disk); 3350 + rbd_dev_update_size(rbd_dev); 3384 3351 } 3385 3352 3386 3353 return ret; ··· 4084 4061 4085 4062 snap_id = snapc->snaps[which]; 4086 4063 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); 4087 - if (IS_ERR(snap_name)) 4088 - break; 4064 + if (IS_ERR(snap_name)) { 4065 + /* ignore no-longer existing snapshots */ 4066 + if (PTR_ERR(snap_name) == -ENOENT) 4067 + continue; 4068 + else 4069 + break; 4070 + } 4089 4071 found = !strcmp(name, snap_name); 4090 4072 kfree(snap_name); 4091 4073 } ··· 4169 4141 /* Look up the snapshot name, and make a copy */ 4170 4142 4171 4143 snap_name = rbd_snap_name(rbd_dev, spec->snap_id); 4172 - if (!snap_name) { 4173 - ret = -ENOMEM; 4144 + if (IS_ERR(snap_name)) { 4145 + ret = PTR_ERR(snap_name); 4174 4146 goto out_err; 4175 4147 } 4176 4148 ··· 5191 5163 if (ret < 0 || already) 5192 5164 return ret; 5193 5165 5194 - rbd_bus_del_dev(rbd_dev); 5195 5166 ret = rbd_dev_header_watch_sync(rbd_dev, false); 5196 5167 if (ret) 5197 5168 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); 5169 + 5170 + /* 5171 + * flush remaining watch callbacks - these must be complete 5172 + * before the osd_client is shutdown 5173 + */ 5174 + dout("%s: flushing notifies", __func__); 5175 + ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); 5176 + /* 5177 + * Don't free anything from rbd_dev->disk until after all 5178 + * notifies are completely processed. Otherwise 5179 + * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting 5180 + * in a potential use after free of rbd_dev->disk or rbd_dev. 5181 + */ 5182 + rbd_bus_del_dev(rbd_dev); 5198 5183 rbd_dev_image_release(rbd_dev); 5199 5184 module_put(THIS_MODULE); 5200 5185
+6 -1
drivers/cpufreq/cpufreq-cpu0.c
··· 12 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 13 14 14 #include <linux/clk.h> 15 + #include <linux/cpu.h> 15 16 #include <linux/cpufreq.h> 16 17 #include <linux/err.h> 17 18 #include <linux/module.h> ··· 178 177 struct device_node *np; 179 178 int ret; 180 179 181 - cpu_dev = &pdev->dev; 180 + cpu_dev = get_cpu_device(0); 181 + if (!cpu_dev) { 182 + pr_err("failed to get cpu0 device\n"); 183 + return -ENODEV; 184 + } 182 185 183 186 np = of_node_get(cpu_dev->of_node); 184 187 if (!np) {
+20 -11
drivers/cpufreq/cpufreq.c
··· 952 952 if (cpu == policy->cpu) 953 953 return; 954 954 955 + /* 956 + * Take direct locks as lock_policy_rwsem_write wouldn't work here. 957 + * Also lock for last cpu is enough here as contention will happen only 958 + * after policy->cpu is changed and after it is changed, other threads 959 + * will try to acquire lock for new cpu. And policy is already updated 960 + * by then. 961 + */ 962 + down_write(&per_cpu(cpu_policy_rwsem, policy->cpu)); 963 + 955 964 policy->last_cpu = policy->cpu; 956 965 policy->cpu = cpu; 966 + 967 + up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu)); 957 968 958 969 #ifdef CONFIG_CPU_FREQ_TABLE 959 970 cpufreq_frequency_table_update_policy_cpu(policy); ··· 1136 1125 int ret; 1137 1126 1138 1127 /* first sibling now owns the new sysfs dir */ 1139 - cpu_dev = get_cpu_device(cpumask_first(policy->cpus)); 1128 + cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); 1140 1129 1141 1130 /* Don't touch sysfs files during light-weight tear-down */ 1142 1131 if (frozen) ··· 1200 1189 policy->governor->name, CPUFREQ_NAME_LEN); 1201 1190 #endif 1202 1191 1203 - WARN_ON(lock_policy_rwsem_write(cpu)); 1192 + lock_policy_rwsem_read(cpu); 1204 1193 cpus = cpumask_weight(policy->cpus); 1205 - 1206 - if (cpus > 1) 1207 - cpumask_clear_cpu(cpu, policy->cpus); 1208 - unlock_policy_rwsem_write(cpu); 1194 + unlock_policy_rwsem_read(cpu); 1209 1195 1210 1196 if (cpu != policy->cpu) { 1211 1197 if (!frozen) ··· 1211 1203 1212 1204 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen); 1213 1205 if (new_cpu >= 0) { 1214 - WARN_ON(lock_policy_rwsem_write(cpu)); 1215 1206 update_policy_cpu(policy, new_cpu); 1216 - unlock_policy_rwsem_write(cpu); 1217 1207 1218 1208 if (!frozen) { 1219 1209 pr_debug("%s: policy Kobject moved to cpu: %d " ··· 1243 1237 return -EINVAL; 1244 1238 } 1245 1239 1246 - lock_policy_rwsem_read(cpu); 1240 + WARN_ON(lock_policy_rwsem_write(cpu)); 1247 1241 cpus = cpumask_weight(policy->cpus); 1248 - unlock_policy_rwsem_read(cpu); 1242 + 1243 + if (cpus > 1) 1244 + cpumask_clear_cpu(cpu, policy->cpus); 1245 + unlock_policy_rwsem_write(cpu); 1249 1246 1250 1247 /* If cpu is last user of policy, free policy */ 1251 1248 if (cpus == 1) { ··· 2104 2095 write_lock_irqsave(&cpufreq_driver_lock, flags); 2105 2096 if (cpufreq_driver) { 2106 2097 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2107 - return -EBUSY; 2098 + return -EEXIST; 2108 2099 } 2109 2100 cpufreq_driver = driver_data; 2110 2101 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+6 -1
drivers/cpufreq/imx6q-cpufreq.c
··· 7 7 */ 8 8 9 9 #include <linux/clk.h> 10 + #include <linux/cpu.h> 10 11 #include <linux/cpufreq.h> 11 12 #include <linux/delay.h> 12 13 #include <linux/err.h> ··· 203 202 unsigned long min_volt, max_volt; 204 203 int num, ret; 205 204 206 - cpu_dev = &pdev->dev; 205 + cpu_dev = get_cpu_device(0); 206 + if (!cpu_dev) { 207 + pr_err("failed to get cpu0 device\n"); 208 + return -ENODEV; 209 + } 207 210 208 211 np = of_node_get(cpu_dev->of_node); 209 212 if (!np) {
+1 -1
drivers/gpu/drm/ast/ast_drv.h
··· 177 177 178 178 static inline void ast_open_key(struct ast_private *ast) 179 179 { 180 - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04); 180 + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); 181 181 } 182 182 183 183 #define AST_VIDMEM_SIZE_8M 0x00800000
+8 -65
drivers/gpu/drm/drm_context.c
··· 42 42 43 43 #include <drm/drmP.h> 44 44 45 + /******************************************************************/ 46 + /** \name Context bitmap support */ 47 + /*@{*/ 48 + 45 49 /** 46 50 * Free a handle from the context bitmap. 47 51 * ··· 56 52 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex 57 53 * lock. 58 54 */ 59 - static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 55 + void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle) 60 56 { 61 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 62 - return; 63 - 64 57 mutex_lock(&dev->struct_mutex); 65 58 idr_remove(&dev->ctx_idr, ctx_handle); 66 59 mutex_unlock(&dev->struct_mutex); 67 - } 68 - 69 - /******************************************************************/ 70 - /** \name Context bitmap support */ 71 - /*@{*/ 72 - 73 - void drm_legacy_ctxbitmap_release(struct drm_device *dev, 74 - struct drm_file *file_priv) 75 - { 76 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 77 - return; 78 - 79 - mutex_lock(&dev->ctxlist_mutex); 80 - if (!list_empty(&dev->ctxlist)) { 81 - struct drm_ctx_list *pos, *n; 82 - 83 - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 84 - if (pos->tag == file_priv && 85 - pos->handle != DRM_KERNEL_CONTEXT) { 86 - if (dev->driver->context_dtor) 87 - dev->driver->context_dtor(dev, 88 - pos->handle); 89 - 90 - drm_ctxbitmap_free(dev, pos->handle); 91 - 92 - list_del(&pos->head); 93 - kfree(pos); 94 - --dev->ctx_count; 95 - } 96 - } 97 - } 98 - mutex_unlock(&dev->ctxlist_mutex); 99 60 } 100 61 101 62 /** ··· 90 121 * 91 122 * Initialise the drm_device::ctx_idr 92 123 */ 93 - void drm_legacy_ctxbitmap_init(struct drm_device * dev) 124 + int drm_ctxbitmap_init(struct drm_device * dev) 94 125 { 95 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 96 - return; 97 - 98 126 idr_init(&dev->ctx_idr); 127 + return 0; 99 128 } 100 129 101 130 /** ··· 104 137 * Free all idr members using drm_ctx_sarea_free helper function 105 138 * while holding the drm_device::struct_mutex lock. 106 139 */ 107 - void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) 140 + void drm_ctxbitmap_cleanup(struct drm_device * dev) 108 141 { 109 142 mutex_lock(&dev->struct_mutex); 110 143 idr_destroy(&dev->ctx_idr); ··· 135 168 struct drm_ctx_priv_map *request = data; 136 169 struct drm_local_map *map; 137 170 struct drm_map_list *_entry; 138 - 139 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 140 - return -EINVAL; 141 171 142 172 mutex_lock(&dev->struct_mutex); 143 173 ··· 179 215 struct drm_ctx_priv_map *request = data; 180 216 struct drm_local_map *map = NULL; 181 217 struct drm_map_list *r_list = NULL; 182 - 183 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 184 - return -EINVAL; 185 218 186 219 mutex_lock(&dev->struct_mutex); 187 220 list_for_each_entry(r_list, &dev->maplist, head) { ··· 280 319 struct drm_ctx ctx; 281 320 int i; 282 321 283 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 284 - return -EINVAL; 285 - 286 322 if (res->count >= DRM_RESERVED_CONTEXTS) { 287 323 memset(&ctx, 0, sizeof(ctx)); 288 324 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ··· 309 351 { 310 352 struct drm_ctx_list *ctx_entry; 311 353 struct drm_ctx *ctx = data; 312 - 313 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 314 - return -EINVAL; 315 354 316 355 ctx->handle = drm_ctxbitmap_next(dev); 317 356 if (ctx->handle == DRM_KERNEL_CONTEXT) { ··· 353 398 { 354 399 struct drm_ctx *ctx = data; 355 400 356 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 357 - return -EINVAL; 358 - 359 401 /* This is 0, because we don't handle any context flags */ 360 402 ctx->flags = 0; 361 403 ··· 375 423 { 376 424 struct drm_ctx *ctx = data; 377 425 378 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 379 - return -EINVAL; 380 - 381 426 DRM_DEBUG("%d\n", ctx->handle); 382 427 return drm_context_switch(dev, dev->last_context, ctx->handle); 383 428 } ··· 394 445 struct drm_file *file_priv) 395 446 { 396 447 struct drm_ctx *ctx = data; 397 - 398 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 399 - return -EINVAL; 400 448 401 449 DRM_DEBUG("%d\n", ctx->handle); 402 450 drm_context_switch_complete(dev, file_priv, ctx->handle); ··· 416 470 struct drm_file *file_priv) 417 471 { 418 472 struct drm_ctx *ctx = data; 419 - 420 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 421 - return -EINVAL; 422 473 423 474 DRM_DEBUG("%d\n", ctx->handle); 424 475 if (ctx->handle != DRM_KERNEL_CONTEXT) {
+8
drivers/gpu/drm/drm_fb_helper.c
··· 416 416 return; 417 417 418 418 /* 419 + * fbdev->blank can be called from irq context in case of a panic. 420 + * Since we already have our own special panic handler which will 421 + * restore the fbdev console mode completely, just bail out early. 422 + */ 423 + if (oops_in_progress) 424 + return; 425 + 426 + /* 419 427 * For each CRTC in this fb, turn the connectors on/off. 420 428 */ 421 429 drm_modeset_lock_all(dev);
+20 -1
drivers/gpu/drm/drm_fops.c
··· 439 439 if (dev->driver->driver_features & DRIVER_GEM) 440 440 drm_gem_release(dev, file_priv); 441 441 442 - drm_legacy_ctxbitmap_release(dev, file_priv); 442 + mutex_lock(&dev->ctxlist_mutex); 443 + if (!list_empty(&dev->ctxlist)) { 444 + struct drm_ctx_list *pos, *n; 445 + 446 + list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { 447 + if (pos->tag == file_priv && 448 + pos->handle != DRM_KERNEL_CONTEXT) { 449 + if (dev->driver->context_dtor) 450 + dev->driver->context_dtor(dev, 451 + pos->handle); 452 + 453 + drm_ctxbitmap_free(dev, pos->handle); 454 + 455 + list_del(&pos->head); 456 + kfree(pos); 457 + --dev->ctx_count; 458 + } 459 + } 460 + } 461 + mutex_unlock(&dev->ctxlist_mutex); 443 462 444 463 mutex_lock(&dev->struct_mutex); 445 464
+8 -2
drivers/gpu/drm/drm_stub.c
··· 292 292 goto error_out_unreg; 293 293 } 294 294 295 - drm_legacy_ctxbitmap_init(dev); 295 + 296 + 297 + retcode = drm_ctxbitmap_init(dev); 298 + if (retcode) { 299 + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 300 + goto error_out_unreg; 301 + } 296 302 297 303 if (driver->driver_features & DRIVER_GEM) { 298 304 retcode = drm_gem_init(dev); ··· 452 446 drm_rmmap(dev, r_list->map); 453 447 drm_ht_remove(&dev->map_hash); 454 448 455 - drm_legacy_ctxbitmap_cleanup(dev); 449 + drm_ctxbitmap_cleanup(dev); 456 450 457 451 if (drm_core_check_feature(dev, DRIVER_MODESET)) 458 452 drm_put_minor(&dev->control);
+1 -1
drivers/gpu/drm/exynos/Kconfig
··· 56 56 57 57 config DRM_EXYNOS_FIMC 58 58 bool "Exynos DRM FIMC" 59 - depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF 59 + depends on DRM_EXYNOS_IPP && MFD_SYSCON 60 60 help 61 61 Choose this option if you want to use Exynos FIMC for DRM. 62 62
+4 -3
drivers/gpu/drm/exynos/exynos_drm_buf.c
··· 63 63 return -ENOMEM; 64 64 } 65 65 66 - buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size, 66 + buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev, 67 + buf->size, 67 68 &buf->dma_addr, GFP_KERNEL, 68 69 &buf->dma_attrs); 69 70 if (!buf->kvaddr) { ··· 91 90 } 92 91 93 92 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages); 94 - if (!buf->sgt) { 93 + if (IS_ERR(buf->sgt)) { 95 94 DRM_ERROR("failed to get sg table.\n"); 96 - ret = -ENOMEM; 95 + ret = PTR_ERR(buf->sgt); 97 96 goto err_free_attrs; 98 97 } 99 98
+3 -2
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 99 99 if (is_drm_iommu_supported(dev)) { 100 100 unsigned int nr_pages = buffer->size >> PAGE_SHIFT; 101 101 102 - buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP, 102 + buffer->kvaddr = (void __iomem *) vmap(buffer->pages, 103 + nr_pages, VM_MAP, 103 104 pgprot_writecombine(PAGE_KERNEL)); 104 105 } else { 105 106 phys_addr_t dma_addr = buffer->dma_addr; 106 107 if (dma_addr) 107 - buffer->kvaddr = phys_to_virt(dma_addr); 108 + buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr); 108 109 else 109 110 buffer->kvaddr = (void __iomem *)NULL; 110 111 }
+4 -7
drivers/gpu/drm/i915/i915_gem.c
··· 1392 1392 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1393 1393 return VM_FAULT_SIGBUS; 1394 1394 case -EAGAIN: 1395 - /* Give the error handler a chance to run and move the 1396 - * objects off the GPU active list. Next time we service the 1397 - * fault, we should be able to transition the page into the 1398 - * GTT without touching the GPU (and so avoid further 1399 - * EIO/EGAIN). If the GPU is wedged, then there is no issue 1400 - * with coherency, just lost writes. 1395 + /* 1396 + * EAGAIN means the gpu is hung and we'll wait for the error 1397 + * handler to reset everything when re-faulting in 1398 + * i915_mutex_lock_interruptible. 1401 1399 */ 1402 - set_need_resched(); 1403 1400 case 0: 1404 1401 case -ERESTARTSYS: 1405 1402 case -EINTR:
+54 -14
drivers/gpu/drm/i915/i915_irq.c
··· 1469 1469 return ret; 1470 1470 } 1471 1471 1472 + static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1473 + bool reset_completed) 1474 + { 1475 + struct intel_ring_buffer *ring; 1476 + int i; 1477 + 1478 + /* 1479 + * Notify all waiters for GPU completion events that reset state has 1480 + * been changed, and that they need to restart their wait after 1481 + * checking for potential errors (and bail out to drop locks if there is 1482 + * a gpu reset pending so that i915_error_work_func can acquire them). 1483 + */ 1484 + 1485 + /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1486 + for_each_ring(ring, dev_priv, i) 1487 + wake_up_all(&ring->irq_queue); 1488 + 1489 + /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1490 + wake_up_all(&dev_priv->pending_flip_queue); 1491 + 1492 + /* 1493 + * Signal tasks blocked in i915_gem_wait_for_error that the pending 1494 + * reset state is cleared. 1495 + */ 1496 + if (reset_completed) 1497 + wake_up_all(&dev_priv->gpu_error.reset_queue); 1498 + } 1499 + 1472 1500 /** 1473 1501 * i915_error_work_func - do process context error handling work 1474 1502 * @work: work struct ··· 1511 1483 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1512 1484 gpu_error); 1513 1485 struct drm_device *dev = dev_priv->dev; 1514 - struct intel_ring_buffer *ring; 1515 1486 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1516 1487 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1517 1488 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1518 - int i, ret; 1489 + int ret; 1519 1490 1520 1491 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 1521 1492 ··· 1533 1506 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 1534 1507 reset_event); 1535 1508 1509 + /* 1510 + * All state reset _must_ be completed before we update the 1511 + * reset counter, for otherwise waiters might miss the reset 1512 + * pending state and not properly drop locks, resulting in 1513 + * deadlocks with the reset work. 1514 + */ 1536 1515 ret = i915_reset(dev); 1516 + 1517 + intel_display_handle_reset(dev); 1537 1518 1538 1519 if (ret == 0) { 1539 1520 /* ··· 1563 1528 atomic_set(&error->reset_counter, I915_WEDGED); 1564 1529 } 1565 1530 1566 - for_each_ring(ring, dev_priv, i) 1567 - wake_up_all(&ring->irq_queue); 1568 - 1569 - intel_display_handle_reset(dev); 1570 - 1571 - wake_up_all(&dev_priv->gpu_error.reset_queue); 1531 + /* 1532 + * Note: The wake_up also serves as a memory barrier so that 1533 + * waiters see the update value of the reset counter atomic_t. 1534 + */ 1535 + i915_error_wake_up(dev_priv, true); 1572 1536 } 1573 1537 } 1574 1538 ··· 1676 1642 void i915_handle_error(struct drm_device *dev, bool wedged) 1677 1643 { 1678 1644 struct drm_i915_private *dev_priv = dev->dev_private; 1679 - struct intel_ring_buffer *ring; 1680 - int i; 1681 1645 1682 1646 i915_capture_error_state(dev); 1683 1647 i915_report_and_clear_eir(dev); ··· 1685 1653 &dev_priv->gpu_error.reset_counter); 1686 1654 1687 1655 /* 1688 - * Wakeup waiting processes so that the reset work item 1689 - * doesn't deadlock trying to grab various locks. 1656 + * Wakeup waiting processes so that the reset work function 1657 + * i915_error_work_func doesn't deadlock trying to grab various 1658 + * locks. By bumping the reset counter first, the woken 1659 + * processes will see a reset in progress and back off, 1660 + * releasing their locks and then wait for the reset completion. 1661 + * We must do this for _all_ gpu waiters that might hold locks 1662 + * that the reset work needs to acquire. 1663 + * 1664 + * Note: The wake_up serves as the required memory barrier to 1665 + * ensure that the waiters see the updated value of the reset 1666 + * counter atomic_t. 1690 1667 */ 1691 - for_each_ring(ring, dev_priv, i) 1692 - wake_up_all(&ring->irq_queue); 1668 + i915_error_wake_up(dev_priv, false); 1693 1669 } 1694 1670 1695 1671 /*
+1 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 778 778 /* Can only use the always-on power well for eDP when 779 779 * not using the panel fitter, and when not using motion 780 780 * blur mitigation (which we don't support). */ 781 - if (intel_crtc->config.pch_pfit.size) 781 + if (intel_crtc->config.pch_pfit.enabled) 782 782 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 783 783 else 784 784 temp |= TRANS_DDI_EDP_INPUT_A_ON;
+20 -22
drivers/gpu/drm/i915/intel_display.c
··· 2249 2249 I915_WRITE(PIPESRC(intel_crtc->pipe), 2250 2250 ((crtc->mode.hdisplay - 1) << 16) | 2251 2251 (crtc->mode.vdisplay - 1)); 2252 - if (!intel_crtc->config.pch_pfit.size && 2252 + if (!intel_crtc->config.pch_pfit.enabled && 2253 2253 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2254 2254 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2255 2255 I915_WRITE(PF_CTL(intel_crtc->pipe), 0); ··· 3203 3203 struct drm_i915_private *dev_priv = dev->dev_private; 3204 3204 int pipe = crtc->pipe; 3205 3205 3206 - if (crtc->config.pch_pfit.size) { 3206 + if (crtc->config.pch_pfit.enabled) { 3207 3207 /* Force use of hard-coded filter coefficients 3208 3208 * as some pre-programmed values are broken, 3209 3209 * e.g. x201. ··· 3428 3428 3429 3429 /* To avoid upsetting the power well on haswell only disable the pfit if 3430 3430 * it's in use. The hw state code will make sure we get this right. */ 3431 - if (crtc->config.pch_pfit.size) { 3431 + if (crtc->config.pch_pfit.enabled) { 3432 3432 I915_WRITE(PF_CTL(pipe), 0); 3433 3433 I915_WRITE(PF_WIN_POS(pipe), 0); 3434 3434 I915_WRITE(PF_WIN_SZ(pipe), 0); ··· 4877 4877 return -EINVAL; 4878 4878 } 4879 4879 4880 - /* Ensure that the cursor is valid for the new mode before changing... */ 4881 - intel_crtc_update_cursor(crtc, true); 4882 - 4883 4880 if (is_lvds && dev_priv->lvds_downclock_avail) { 4884 4881 /* 4885 4882 * Ensure we match the reduced clock's P to the target clock. ··· 5765 5768 intel_crtc->config.dpll.p2 = clock.p2; 5766 5769 } 5767 5770 5768 - /* Ensure that the cursor is valid for the new mode before changing... */ 5769 - intel_crtc_update_cursor(crtc, true); 5770 - 5771 5771 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 5772 5772 if (intel_crtc->config.has_pch_encoder) { 5773 5773 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); ··· 5853 5859 tmp = I915_READ(PF_CTL(crtc->pipe)); 5854 5860 5855 5861 if (tmp & PF_ENABLE) { 5862 + pipe_config->pch_pfit.enabled = true; 5856 5863 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 5857 5864 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 5858 5865 ··· 6231 6236 if (!crtc->base.enabled) 6232 6237 continue; 6233 6238 6234 - if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.size || 6239 + if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled || 6235 6240 crtc->config.cpu_transcoder != TRANSCODER_EDP) 6236 6241 enable = true; 6237 6242 } ··· 6253 6258 6254 6259 if (!intel_ddi_pll_mode_set(crtc)) 6255 6260 return -EINVAL; 6256 - 6257 - /* Ensure that the cursor is valid for the new mode before changing... */ 6258 - intel_crtc_update_cursor(crtc, true); 6259 6261 6260 6262 if (intel_crtc->config.has_dp_encoder) 6261 6263 intel_dp_set_m_n(intel_crtc); ··· 6486 6494 6487 6495 /* Set ELD valid state */ 6488 6496 tmp = I915_READ(aud_cntrl_st2); 6489 - DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); 6497 + DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp); 6490 6498 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); 6491 6499 I915_WRITE(aud_cntrl_st2, tmp); 6492 6500 tmp = I915_READ(aud_cntrl_st2); 6493 - DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); 6501 + DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp); 6494 6502 6495 6503 /* Enable HDMI mode */ 6496 6504 tmp = I915_READ(aud_config); 6497 - DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); 6505 + DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp); 6498 6506 /* clear N_programing_enable and N_value_index */ 6499 6507 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); 6500 6508 I915_WRITE(aud_config, tmp); ··· 6929 6937 intel_crtc->cursor_width = width; 6930 6938 intel_crtc->cursor_height = height; 6931 6939 6932 - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6940 + if (intel_crtc->active) 6941 + intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6933 6942 6934 6943 return 0; 6935 6944 fail_unpin: ··· 6949 6956 intel_crtc->cursor_x = x; 6950 6957 intel_crtc->cursor_y = y; 6951 6958 6952 - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6959 + if (intel_crtc->active) 6960 + intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 6953 6961 6954 6962 return 0; 6955 6963 } ··· 8199 8205 pipe_config->gmch_pfit.control, 8200 8206 pipe_config->gmch_pfit.pgm_ratios, 8201 8207 pipe_config->gmch_pfit.lvds_border_bits); 8202 - DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x\n", 8208 + DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 8203 8209 pipe_config->pch_pfit.pos, 8204 - pipe_config->pch_pfit.size); 8210 + pipe_config->pch_pfit.size, 8211 + pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 8205 8212 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 8206 8213 } 8207 8214 ··· 8598 8603 if (INTEL_INFO(dev)->gen < 4) 8599 8604 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 8600 8605 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 8601 - PIPE_CONF_CHECK_I(pch_pfit.pos); 8602 - PIPE_CONF_CHECK_I(pch_pfit.size); 8606 + PIPE_CONF_CHECK_I(pch_pfit.enabled); 8607 + if (current_config->pch_pfit.enabled) { 8608 + PIPE_CONF_CHECK_I(pch_pfit.pos); 8609 + PIPE_CONF_CHECK_I(pch_pfit.size); 8610 + } 8603 8611 8604 8612 PIPE_CONF_CHECK_I(ips_enabled); 8605 8613
+1
drivers/gpu/drm/i915/intel_drv.h
··· 280 280 struct { 281 281 u32 pos; 282 282 u32 size; 283 + bool enabled; 283 284 } pch_pfit; 284 285 285 286 /* FDI configuration, only valid if has_pch_encoder is set. */
+2
drivers/gpu/drm/i915/intel_dvo.c
··· 263 263 C(vtotal); 264 264 C(clock); 265 265 #undef C 266 + 267 + drm_mode_set_crtcinfo(adjusted_mode, 0); 266 268 } 267 269 268 270 if (intel_dvo->dev.dev_ops->mode_fixup)
+1
drivers/gpu/drm/i915/intel_panel.c
··· 112 112 done: 113 113 pipe_config->pch_pfit.pos = (x << 16) | y; 114 114 pipe_config->pch_pfit.size = (width << 16) | height; 115 + pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0; 115 116 } 116 117 117 118 static void
+3 -3
drivers/gpu/drm/i915/intel_pm.c
··· 2096 2096 struct drm_crtc *crtc) 2097 2097 { 2098 2098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2099 - uint32_t pixel_rate, pfit_size; 2099 + uint32_t pixel_rate; 2100 2100 2101 2101 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2102 2102 2103 2103 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2104 2104 * adjust the pixel_rate here. */ 2105 2105 2106 - pfit_size = intel_crtc->config.pch_pfit.size; 2107 - if (pfit_size) { 2106 + if (intel_crtc->config.pch_pfit.enabled) { 2108 2107 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 2108 + uint32_t pfit_size = intel_crtc->config.pch_pfit.size; 2109 2109 2110 2110 pipe_w = intel_crtc->config.requested_mode.hdisplay; 2111 2111 pipe_h = intel_crtc->config.requested_mode.vdisplay;
+35 -26
drivers/gpu/drm/i915/intel_sdvo.c
··· 788 788 uint16_t h_sync_offset, v_sync_offset; 789 789 int mode_clock; 790 790 791 + memset(dtd, 0, sizeof(*dtd)); 792 + 791 793 width = mode->hdisplay; 792 794 height = mode->vdisplay; 793 795 ··· 832 830 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 833 831 dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; 834 832 835 - dtd->part2.sdvo_flags = 0; 836 833 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; 837 - dtd->part2.reserved = 0; 838 834 } 839 835 840 - static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, 836 + static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode *pmode, 841 837 const struct intel_sdvo_dtd *dtd) 842 838 { 843 - mode->hdisplay = dtd->part1.h_active; 844 - mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; 845 - mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off; 846 - mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; 847 - mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width; 848 - mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; 849 - mode->htotal = mode->hdisplay + dtd->part1.h_blank; 850 - mode->htotal += (dtd->part1.h_high & 0xf) << 8; 839 + struct drm_display_mode mode = {}; 851 840 852 - mode->vdisplay = dtd->part1.v_active; 853 - mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; 854 - mode->vsync_start = mode->vdisplay; 855 - mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; 856 - mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; 857 - mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0; 858 - mode->vsync_end = mode->vsync_start + 841 + mode.hdisplay = dtd->part1.h_active; 842 + mode.hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; 843 + mode.hsync_start = mode.hdisplay + dtd->part2.h_sync_off; 844 + mode.hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2; 845 + mode.hsync_end = mode.hsync_start + dtd->part2.h_sync_width; 846 + mode.hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4; 847 + mode.htotal = mode.hdisplay + dtd->part1.h_blank; 848 + mode.htotal += (dtd->part1.h_high & 0xf) << 8; 849 + 850 + mode.vdisplay = dtd->part1.v_active; 851 + mode.vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8; 852 + mode.vsync_start = mode.vdisplay; 853 + mode.vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf; 854 + mode.vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2; 855 + mode.vsync_start += dtd->part2.v_sync_off_high & 0xc0; 856 + mode.vsync_end = mode.vsync_start + 859 857 (dtd->part2.v_sync_off_width & 0xf); 860 - mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; 861 - mode->vtotal = mode->vdisplay + dtd->part1.v_blank; 862 - mode->vtotal += (dtd->part1.v_high & 0xf) << 8; 858 + mode.vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4; 859 + mode.vtotal = mode.vdisplay + dtd->part1.v_blank; 860 + mode.vtotal += (dtd->part1.v_high & 0xf) << 8; 863 861 864 - mode->clock = dtd->part1.clock * 10; 862 + mode.clock = dtd->part1.clock * 10; 865 863 866 - mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); 867 864 if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) 868 - mode->flags |= DRM_MODE_FLAG_INTERLACE; 865 + mode.flags |= DRM_MODE_FLAG_INTERLACE; 869 866 if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) 870 - mode->flags |= DRM_MODE_FLAG_PHSYNC; 867 + mode.flags |= DRM_MODE_FLAG_PHSYNC; 868 + else 869 + mode.flags |= DRM_MODE_FLAG_NHSYNC; 871 870 if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) 872 - mode->flags |= DRM_MODE_FLAG_PVSYNC; 871 + mode.flags |= DRM_MODE_FLAG_PVSYNC; 872 + else 873 + mode.flags |= DRM_MODE_FLAG_NVSYNC; 874 + 875 + drm_mode_set_crtcinfo(&mode, 0); 876 + 877 + drm_mode_copy(pmode, &mode); 873 878 } 874 879 875 880 static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+9 -1
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 124 124 125 125 /* reset completed fence seqno, just discard anything pending: */ 126 126 adreno_gpu->memptrs->fence = gpu->submitted_fence; 127 + adreno_gpu->memptrs->rptr = 0; 128 + adreno_gpu->memptrs->wptr = 0; 127 129 128 130 gpu->funcs->pm_resume(gpu); 129 131 ret = gpu->funcs->hw_init(gpu); ··· 231 229 return; 232 230 } while(time_before(jiffies, t)); 233 231 234 - DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name); 232 + DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); 235 233 236 234 /* TODO maybe we need to reset GPU here to recover from hang? */ 237 235 } ··· 258 256 { 259 257 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 260 258 uint32_t freedwords; 259 + unsigned long t = jiffies + ADRENO_IDLE_TIMEOUT; 261 260 do { 262 261 uint32_t size = gpu->rb->size / 4; 263 262 uint32_t wptr = get_wptr(gpu->rb); 264 263 uint32_t rptr = adreno_gpu->memptrs->rptr; 265 264 freedwords = (rptr + (size - 1) - wptr) % size; 265 + 266 + if (time_after(jiffies, t)) { 267 + DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); 268 + break; 269 + } 266 270 } while(freedwords < ndwords); 267 271 } 268 272
+32 -16
drivers/gpu/drm/msm/msm_drv.c
··· 499 499 struct timespec *timeout) 500 500 { 501 501 struct msm_drm_private *priv = dev->dev_private; 502 - unsigned long timeout_jiffies = timespec_to_jiffies(timeout); 503 - unsigned long start_jiffies = jiffies; 504 - unsigned long remaining_jiffies; 505 502 int ret; 506 503 507 - if (time_after(start_jiffies, timeout_jiffies)) 508 - remaining_jiffies = 0; 509 - else 510 - remaining_jiffies = timeout_jiffies - start_jiffies; 504 + if (!priv->gpu) 505 + return 0; 511 506 512 - ret = wait_event_interruptible_timeout(priv->fence_event, 513 - priv->completed_fence >= fence, 514 - remaining_jiffies); 515 - if (ret == 0) { 516 - DBG("timeout waiting for fence: %u (completed: %u)", 517 - fence, priv->completed_fence); 518 - ret = -ETIMEDOUT; 519 - } else if (ret != -ERESTARTSYS) { 520 - ret = 0; 507 + if (fence > priv->gpu->submitted_fence) { 508 + DRM_ERROR("waiting on invalid fence: %u (of %u)\n", 509 + fence, priv->gpu->submitted_fence); 510 + return -EINVAL; 511 + } 512 + 513 + if (!timeout) { 514 + /* no-wait: */ 515 + ret = fence_completed(dev, fence) ? 0 : -EBUSY; 516 + } else { 517 + unsigned long timeout_jiffies = timespec_to_jiffies(timeout); 518 + unsigned long start_jiffies = jiffies; 519 + unsigned long remaining_jiffies; 520 + 521 + if (time_after(start_jiffies, timeout_jiffies)) 522 + remaining_jiffies = 0; 523 + else 524 + remaining_jiffies = timeout_jiffies - start_jiffies; 525 + 526 + ret = wait_event_interruptible_timeout(priv->fence_event, 527 + fence_completed(dev, fence), 528 + remaining_jiffies); 529 + 530 + if (ret == 0) { 531 + DBG("timeout waiting for fence: %u (completed: %u)", 532 + fence, priv->completed_fence); 533 + ret = -ETIMEDOUT; 534 + } else if (ret != -ERESTARTSYS) { 535 + ret = 0; 536 + } 521 537 } 522 538 523 539 return ret;
+7 -1
drivers/gpu/drm/msm/msm_drv.h
··· 153 153 int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 154 154 struct work_struct *work); 155 155 void msm_gem_move_to_active(struct drm_gem_object *obj, 156 - struct msm_gpu *gpu, uint32_t fence); 156 + struct msm_gpu *gpu, bool write, uint32_t fence); 157 157 void msm_gem_move_to_inactive(struct drm_gem_object *obj); 158 158 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 159 159 struct timespec *timeout); ··· 190 190 191 191 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 192 192 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 193 + 194 + static inline bool fence_completed(struct drm_device *dev, uint32_t fence) 195 + { 196 + struct msm_drm_private *priv = dev->dev_private; 197 + return priv->completed_fence >= fence; 198 + } 193 199 194 200 static inline int align_pitch(int width, int bpp) 195 201 {
+24 -10
drivers/gpu/drm/msm/msm_gem.c
··· 40 40 } 41 41 42 42 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); 43 - if (!msm_obj->sgt) { 43 + if (IS_ERR(msm_obj->sgt)) { 44 44 dev_err(dev->dev, "failed to allocate sgt\n"); 45 - return ERR_PTR(-ENOMEM); 45 + return ERR_CAST(msm_obj->sgt); 46 46 } 47 47 48 48 msm_obj->pages = p; ··· 159 159 out: 160 160 switch (ret) { 161 161 case -EAGAIN: 162 - set_need_resched(); 163 162 case 0: 164 163 case -ERESTARTSYS: 165 164 case -EINTR: ··· 392 393 } 393 394 394 395 void msm_gem_move_to_active(struct drm_gem_object *obj, 395 - struct msm_gpu *gpu, uint32_t fence) 396 + struct msm_gpu *gpu, bool write, uint32_t fence) 396 397 { 397 398 struct msm_gem_object *msm_obj = to_msm_bo(obj); 398 399 msm_obj->gpu = gpu; 399 - msm_obj->fence = fence; 400 + if (write) 401 + msm_obj->write_fence = fence; 402 + else 403 + msm_obj->read_fence = fence; 400 404 list_del_init(&msm_obj->mm_list); 401 405 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 402 406 } ··· 413 411 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 414 412 415 413 msm_obj->gpu = NULL; 416 - msm_obj->fence = 0; 414 + msm_obj->read_fence = 0; 415 + msm_obj->write_fence = 0; 417 416 list_del_init(&msm_obj->mm_list); 418 417 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 419 418 ··· 436 433 struct msm_gem_object *msm_obj = to_msm_bo(obj); 437 434 int ret = 0; 438 435 439 - if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) 440 - ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); 436 + if (is_active(msm_obj)) { 437 + uint32_t fence = 0; 438 + 439 + if (op & MSM_PREP_READ) 440 + fence = msm_obj->write_fence; 441 + if (op & MSM_PREP_WRITE) 442 + fence = max(fence, msm_obj->read_fence); 443 + if (op & MSM_PREP_NOSYNC) 444 + timeout = NULL; 445 + 446 + ret = msm_wait_fence_interruptable(dev, fence, timeout); 447 + } 441 448 442 449 /* TODO cache maintenance */ 443 450 ··· 468 455 uint64_t off = drm_vma_node_start(&obj->vma_node); 469 456 470 457 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 471 - seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", 458 + seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n", 472 459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 473 - msm_obj->fence, obj->name, obj->refcount.refcount.counter, 460 + msm_obj->read_fence, msm_obj->write_fence, 461 + obj->name, obj->refcount.refcount.counter, 474 462 off, msm_obj->vaddr, obj->size); 475 463 } 476 464
+1 -1
drivers/gpu/drm/msm/msm_gem.h
··· 36 36 */ 37 37 struct list_head mm_list; 38 38 struct msm_gpu *gpu; /* non-null if active */ 39 - uint32_t fence; 39 + uint32_t read_fence, write_fence; 40 40 41 41 /* Transiently in the process of submit ioctl, objects associated 42 42 * with the submit are on submit->bo_list.. this only lasts for
+13 -11
drivers/gpu/drm/msm/msm_gem_submit.c
··· 78 78 } 79 79 80 80 if (submit_bo.flags & BO_INVALID_FLAGS) { 81 - DBG("invalid flags: %x", submit_bo.flags); 81 + DRM_ERROR("invalid flags: %x\n", submit_bo.flags); 82 82 ret = -EINVAL; 83 83 goto out_unlock; 84 84 } ··· 92 92 */ 93 93 obj = idr_find(&file->object_idr, submit_bo.handle); 94 94 if (!obj) { 95 - DBG("invalid handle %u at index %u", submit_bo.handle, i); 95 + DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i); 96 96 ret = -EINVAL; 97 97 goto out_unlock; 98 98 } ··· 100 100 msm_obj = to_msm_bo(obj); 101 101 102 102 if (!list_empty(&msm_obj->submit_entry)) { 103 - DBG("handle %u at index %u already on submit list", 103 + DRM_ERROR("handle %u at index %u already on submit list\n", 104 104 submit_bo.handle, i); 105 105 ret = -EINVAL; 106 106 goto out_unlock; ··· 216 216 struct msm_gem_object **obj, uint32_t *iova, bool *valid) 217 217 { 218 218 if (idx >= submit->nr_bos) { 219 - DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos); 220 - return EINVAL; 219 + DRM_ERROR("invalid buffer index: %u (out of %u)\n", 220 + idx, submit->nr_bos); 221 + return -EINVAL; 221 222 } 222 223 223 224 if (obj) ··· 240 239 int ret; 241 240 242 241 if (offset % 4) { 243 - DBG("non-aligned cmdstream buffer: %u", offset); 242 + DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); 244 243 return -EINVAL; 245 244 } 246 245 ··· 267 266 return -EFAULT; 268 267 269 268 if (submit_reloc.submit_offset % 4) { 270 - DBG("non-aligned reloc offset: %u", 269 + DRM_ERROR("non-aligned reloc offset: %u\n", 271 270 submit_reloc.submit_offset); 272 271 return -EINVAL; 273 272 } ··· 277 276 278 277 if ((off >= (obj->base.size / 4)) || 279 278 (off < last_offset)) { 280 - DBG("invalid offset %u at reloc %u", off, i); 279 + DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 281 280 return -EINVAL; 282 281 } 283 282 ··· 375 374 goto out; 376 375 377 376 if (submit_cmd.size % 4) { 378 - DBG("non-aligned cmdstream buffer size: %u", 377 + DRM_ERROR("non-aligned cmdstream buffer size: %u\n", 379 378 submit_cmd.size); 380 379 ret = -EINVAL; 381 380 goto out; 382 381 } 383 382 384 - if (submit_cmd.size >= msm_obj->base.size) { 385 - DBG("invalid cmdstream size: %u", submit_cmd.size); 383 + if ((submit_cmd.size + submit_cmd.submit_offset) >= 384 + msm_obj->base.size) { 385 + DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); 386 386 ret = -EINVAL; 387 387 goto out; 388 388 }
+20 -4
drivers/gpu/drm/msm/msm_gpu.c
··· 29 29 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) 30 30 { 31 31 struct drm_device *dev = gpu->dev; 32 - struct kgsl_device_platform_data *pdata = pdev->dev.platform_data; 32 + struct kgsl_device_platform_data *pdata; 33 33 34 34 if (!pdev) { 35 35 dev_err(dev->dev, "could not find dtv pdata\n"); 36 36 return; 37 37 } 38 38 39 + pdata = pdev->dev.platform_data; 39 40 if (pdata->bus_scale_table) { 40 41 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table); 41 42 DBG("bus scale client: %08x", gpu->bsc); ··· 231 230 static void hangcheck_handler(unsigned long data) 232 231 { 233 232 struct msm_gpu *gpu = (struct msm_gpu *)data; 233 + struct drm_device *dev = gpu->dev; 234 + struct msm_drm_private *priv = dev->dev_private; 234 235 uint32_t fence = gpu->funcs->last_fence(gpu); 235 236 236 237 if (fence != gpu->hangcheck_fence) { ··· 240 237 gpu->hangcheck_fence = fence; 241 238 } else if (fence < gpu->submitted_fence) { 242 239 /* no progress and not done.. hung! */ 243 - struct msm_drm_private *priv = gpu->dev->dev_private; 244 240 gpu->hangcheck_fence = fence; 241 + dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", 242 + gpu->name); 243 + dev_err(dev->dev, "%s: completed fence: %u\n", 244 + gpu->name, fence); 245 + dev_err(dev->dev, "%s: submitted fence: %u\n", 246 + gpu->name, gpu->submitted_fence); 245 247 queue_work(priv->wq, &gpu->recover_work); 246 248 } 247 249 248 250 /* if still more pending work, reset the hangcheck timer: */ 249 251 if (gpu->submitted_fence > gpu->hangcheck_fence) 250 252 hangcheck_timer_reset(gpu); 253 + 254 + /* workaround for missing irq: */ 255 + queue_work(priv->wq, &gpu->retire_work); 251 256 } 252 257 253 258 /* ··· 276 265 obj = list_first_entry(&gpu->active_list, 277 266 struct msm_gem_object, mm_list); 278 267 279 - if (obj->fence <= fence) { 268 + if ((obj->read_fence <= fence) && 269 + (obj->write_fence <= fence)) { 280 270 /* move to inactive: */ 281 271 msm_gem_move_to_inactive(&obj->base); 282 272 msm_gem_put_iova(&obj->base, gpu->id); ··· 333 321 submit->gpu->id, &iova); 334 322 } 335 323 336 - msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); 324 + if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 325 + msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); 326 + 327 + if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 328 + msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 337 329 } 338 330 hangcheck_timer_reset(gpu); 339 331 mutex_unlock(&dev->struct_mutex);
+18 -3
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
··· 579 579 init_reserved(struct nvbios_init *init) 580 580 { 581 581 u8 opcode = nv_ro08(init->bios, init->offset); 582 - trace("RESERVED\t0x%02x\n", opcode); 583 - init->offset += 1; 582 + u8 length, i; 583 + 584 + switch (opcode) { 585 + case 0xaa: 586 + length = 4; 587 + break; 588 + default: 589 + length = 1; 590 + break; 591 + } 592 + 593 + trace("RESERVED 0x%02x\t", opcode); 594 + for (i = 1; i < length; i++) 595 + cont(" 0x%02x", nv_ro08(init->bios, init->offset + i)); 596 + cont("\n"); 597 + init->offset += length; 584 598 } 585 599 586 600 /** ··· 1451 1437 data = init_rdvgai(init, 0x03c4, 0x01); 1452 1438 init_wrvgai(init, 0x03c4, 0x01, data | 0x20); 1453 1439 1454 - while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) { 1440 + for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) { 1455 1441 switch (addr) { 1456 1442 case 0x10021c: /* CKE_NORMAL */ 1457 1443 case 0x1002d0: /* CMD_REFRESH */ ··· 2149 2135 [0x99] = { init_zm_auxch }, 2150 2136 [0x9a] = { init_i2c_long_if }, 2151 2137 [0xa9] = { init_gpio_ne }, 2138 + [0xaa] = { init_reserved }, 2152 2139 }; 2153 2140 2154 2141 #define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
+17 -22
drivers/gpu/drm/nouveau/nouveau_display.c
··· 278 278 { 279 279 struct nouveau_drm *drm = nouveau_drm(dev); 280 280 struct nouveau_display *disp; 281 - u32 pclass = dev->pdev->class >> 8; 282 281 int ret, gen; 283 282 284 283 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); ··· 339 340 drm_kms_helper_poll_init(dev); 340 341 drm_kms_helper_poll_disable(dev); 341 342 342 - if (nouveau_modeset == 1 || 343 - (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) { 344 - if (drm->vbios.dcb.entries) { 345 - if (nv_device(drm->device)->card_type < NV_50) 346 - ret = nv04_display_create(dev); 347 - else 348 - ret = nv50_display_create(dev); 349 - } else { 350 - ret = 0; 351 - } 352 - 353 - if (ret) 354 - goto disp_create_err; 355 - 356 - if (dev->mode_config.num_crtc) { 357 - ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 358 - if (ret) 359 - goto vblank_err; 360 - } 361 - 362 - nouveau_backlight_init(dev); 343 + if (drm->vbios.dcb.entries) { 344 + if (nv_device(drm->device)->card_type < NV_50) 345 + ret = nv04_display_create(dev); 346 + else 347 + ret = nv50_display_create(dev); 348 + } else { 349 + ret = 0; 363 350 } 364 351 352 + if (ret) 353 + goto disp_create_err; 354 + 355 + if (dev->mode_config.num_crtc) { 356 + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 357 + if (ret) 358 + goto vblank_err; 359 + } 360 + 361 + nouveau_backlight_init(dev); 365 362 return 0; 366 363 367 364 vblank_err:
+2 -1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 454 454 int preferred_bpp; 455 455 int ret; 456 456 457 - if (!dev->mode_config.num_crtc) 457 + if (!dev->mode_config.num_crtc || 458 + (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 458 459 return 0; 459 460 460 461 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+1 -3
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 104 104 else 105 105 nvbe->ttm.ttm.func = &nv50_sgdma_backend; 106 106 107 - if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 108 - kfree(nvbe); 107 + if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) 109 108 return NULL; 110 - } 111 109 return &nvbe->ttm.ttm; 112 110 }
+15 -8
drivers/gpu/drm/radeon/atombios_encoders.c
··· 707 707 switch (connector->connector_type) { 708 708 case DRM_MODE_CONNECTOR_DVII: 709 709 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 710 - if (drm_detect_hdmi_monitor(radeon_connector->edid) && 711 - radeon_audio) 710 + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 711 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 712 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 712 713 return ATOM_ENCODER_MODE_HDMI; 713 714 else if (radeon_connector->use_digital) 714 715 return ATOM_ENCODER_MODE_DVI; ··· 719 718 case DRM_MODE_CONNECTOR_DVID: 720 719 case DRM_MODE_CONNECTOR_HDMIA: 721 720 default: 722 - if (drm_detect_hdmi_monitor(radeon_connector->edid) && 723 - radeon_audio) 721 + if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 722 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 723 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 724 724 return ATOM_ENCODER_MODE_HDMI; 725 725 else 726 726 return ATOM_ENCODER_MODE_DVI; ··· 734 732 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 735 733 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 736 734 return ATOM_ENCODER_MODE_DP; 737 - else if (drm_detect_hdmi_monitor(radeon_connector->edid) && 738 - radeon_audio) 735 + else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) || 736 + (drm_detect_hdmi_monitor(radeon_connector->edid) && 737 + (radeon_connector->audio == RADEON_AUDIO_AUTO))) 739 738 return ATOM_ENCODER_MODE_HDMI; 740 739 else 741 740 return ATOM_ENCODER_MODE_DVI; ··· 1650 1647 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); 1651 1648 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1652 1649 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1653 - /* some early dce3.2 boards have a bug in their transmitter control table */ 1654 - if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) 1650 + /* some dce3.x boards have a bug in their transmitter control table. 1651 + * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE 1652 + * does the same thing and more. 1653 + */ 1654 + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) && 1655 + (rdev->family != CHIP_RS880)) 1655 1656 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1656 1657 } 1657 1658 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
-6
drivers/gpu/drm/radeon/btc_dpm.c
··· 2340 2340 return ret; 2341 2341 } 2342 2342 2343 - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 2344 - if (ret) { 2345 - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); 2346 - return ret; 2347 - } 2348 - 2349 2343 return 0; 2350 2344 } 2351 2345
-6
drivers/gpu/drm/radeon/ci_dpm.c
··· 4748 4748 if (pi->pcie_performance_request) 4749 4749 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 4750 4750 4751 - ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 4752 - if (ret) { 4753 - DRM_ERROR("ci_dpm_force_performance_level failed\n"); 4754 - return ret; 4755 - } 4756 - 4757 4751 cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 4758 4752 RADEON_CG_BLOCK_MC | 4759 4753 RADEON_CG_BLOCK_SDMA |
+26 -13
drivers/gpu/drm/radeon/ci_smc.c
··· 47 47 u32 smc_start_address, 48 48 const u8 *src, u32 byte_count, u32 limit) 49 49 { 50 + unsigned long flags; 50 51 u32 data, original_data; 51 52 u32 addr; 52 53 u32 extra_shift; 53 - int ret; 54 + int ret = 0; 54 55 55 56 if (smc_start_address & 3) 56 57 return -EINVAL; ··· 60 59 61 60 addr = smc_start_address; 62 61 62 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 63 63 while (byte_count >= 4) { 64 64 /* SMC address space is BE */ 65 65 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 66 66 67 67 ret = ci_set_smc_sram_address(rdev, addr, limit); 68 68 if (ret) 69 - return ret; 69 + goto done; 70 70 71 71 WREG32(SMC_IND_DATA_0, data); 72 72 ··· 82 80 83 81 ret = ci_set_smc_sram_address(rdev, addr, limit); 84 82 if (ret) 85 - return ret; 83 + goto done; 86 84 87 85 original_data = RREG32(SMC_IND_DATA_0); 88 86 ··· 99 97 100 98 ret = ci_set_smc_sram_address(rdev, addr, limit); 101 99 if (ret) 102 - return ret; 100 + goto done; 103 101 104 102 WREG32(SMC_IND_DATA_0, data); 105 103 } 106 - return 0; 104 + 105 + done: 106 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 107 + 108 + return ret; 107 109 } 108 110 109 111 void ci_start_smc(struct radeon_device *rdev) ··· 203 197 204 198 int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) 205 199 { 200 + unsigned long flags; 206 201 u32 ucode_start_address; 207 202 u32 ucode_size; 208 203 const u8 *src; ··· 226 219 return -EINVAL; 227 220 228 221 src = (const u8 *)rdev->smc_fw->data; 222 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 229 223 WREG32(SMC_IND_INDEX_0, ucode_start_address); 230 224 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 231 225 while (ucode_size >= 4) { ··· 239 231 ucode_size -= 4; 240 232 } 241 233 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 234 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 242 235 243 236 return 0; 244 237 } ··· 247 238 int ci_read_smc_sram_dword(struct radeon_device *rdev, 248 239 u32 smc_address, u32 *value, u32 limit) 249 240 { 241 + unsigned long flags; 250 242 int ret; 251 243 244 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 252 245 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 253 - if (ret) 254 - return ret; 246 + if (ret == 0) 247 + *value = RREG32(SMC_IND_DATA_0); 248 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 255 249 256 - *value = RREG32(SMC_IND_DATA_0); 257 - return 0; 250 + return ret; 258 251 } 259 252 260 253 int ci_write_smc_sram_dword(struct radeon_device *rdev, 261 254 u32 smc_address, u32 value, u32 limit) 262 255 { 256 + unsigned long flags; 263 257 int ret; 264 258 259 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 265 260 ret = ci_set_smc_sram_address(rdev, smc_address, limit); 266 - if (ret) 267 - return ret; 261 + if (ret == 0) 262 + WREG32(SMC_IND_DATA_0, value); 263 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 268 264 269 - WREG32(SMC_IND_DATA_0, value); 270 - return 0; 265 + return ret; 271 266 }
+29 -7
drivers/gpu/drm/radeon/cik.c
··· 77 77 static void cik_program_aspm(struct radeon_device *rdev); 78 78 static void cik_init_pg(struct radeon_device *rdev); 79 79 static void cik_init_cg(struct radeon_device *rdev); 80 + static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, 81 + bool enable); 80 82 81 83 /* get temperature in millidegrees */ 82 84 int ci_get_temp(struct radeon_device *rdev) ··· 122 120 */ 123 121 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg) 124 122 { 123 + unsigned long flags; 125 124 u32 r; 126 125 126 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 127 127 WREG32(PCIE_INDEX, reg); 128 128 (void)RREG32(PCIE_INDEX); 129 129 r = RREG32(PCIE_DATA); 130 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 130 131 return r; 131 132 } 132 133 133 134 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 134 135 { 136 + unsigned long flags; 137 + 138 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 135 139 WREG32(PCIE_INDEX, reg); 136 140 (void)RREG32(PCIE_INDEX); 137 141 WREG32(PCIE_DATA, v); 138 142 (void)RREG32(PCIE_DATA); 143 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 139 144 } 140 145 141 146 static const u32 spectre_rlc_save_restore_register_list[] = ··· 2731 2722 } else if ((rdev->pdev->device == 0x1309) || 2732 2723 (rdev->pdev->device == 0x130A) || 2733 2724 (rdev->pdev->device == 0x130D) || 2734 - (rdev->pdev->device == 0x1313)) { 2725 + (rdev->pdev->device == 0x1313) || 2726 + (rdev->pdev->device == 0x131D)) { 2735 2727 rdev->config.cik.max_cu_per_sh = 6; 2736 2728 rdev->config.cik.max_backends_per_se = 2; 2737 2729 } else if ((rdev->pdev->device == 0x1306) || ··· 4023 4013 { 4024 4014 int r; 4025 4015 4016 + cik_enable_gui_idle_interrupt(rdev, false); 4017 + 4026 4018 r = cik_cp_load_microcode(rdev); 4027 4019 if (r) 4028 4020 return r; ··· 4035 4023 r = cik_cp_compute_resume(rdev); 4036 4024 if (r) 4037 4025 return r; 4026 + 4027 + cik_enable_gui_idle_interrupt(rdev, true); 4038 4028 4039 4029 return 0; 4040 4030 } ··· 5390 5376 void cik_update_cg(struct radeon_device *rdev, 5391 5377 u32 block, bool enable) 5392 5378 { 5379 + 5393 5380 if (block & RADEON_CG_BLOCK_GFX) { 5381 + cik_enable_gui_idle_interrupt(rdev, false); 5394 5382 /* order matters! */ 5395 5383 if (enable) { 5396 5384 cik_enable_mgcg(rdev, true); ··· 5401 5385 cik_enable_cgcg(rdev, false); 5402 5386 cik_enable_mgcg(rdev, false); 5403 5387 } 5388 + cik_enable_gui_idle_interrupt(rdev, true); 5404 5389 } 5405 5390 5406 5391 if (block & RADEON_CG_BLOCK_MC) { ··· 5558 5541 { 5559 5542 u32 data, orig; 5560 5543 5561 - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 5544 + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { 5562 5545 orig = data = RREG32(RLC_PG_CNTL); 5563 5546 data |= GFX_PG_ENABLE; 5564 5547 if (orig != data) ··· 5822 5805 if (rdev->pg_flags) { 5823 5806 cik_enable_sck_slowdown_on_pu(rdev, true); 5824 5807 cik_enable_sck_slowdown_on_pd(rdev, true); 5825 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5808 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5826 5809 cik_init_gfx_cgpg(rdev); 5827 5810 cik_enable_cp_pg(rdev, true); 5828 5811 cik_enable_gds_pg(rdev, true); ··· 5836 5819 { 5837 5820 if (rdev->pg_flags) { 5838 5821 cik_update_gfx_pg(rdev, false); 5839 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5822 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5840 5823 cik_enable_cp_pg(rdev, false); 5841 5824 cik_enable_gds_pg(rdev, false); 5842 5825 } ··· 5912 5895 u32 tmp; 5913 5896 5914 5897 /* gfx ring */ 5915 - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5898 + tmp = RREG32(CP_INT_CNTL_RING0) & 5899 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5900 + WREG32(CP_INT_CNTL_RING0, tmp); 5916 5901 /* sdma */ 5917 5902 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 5918 5903 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp); ··· 6055 6036 */ 6056 6037 int cik_irq_set(struct radeon_device *rdev) 6057 6038 { 6058 - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE | 6059 - PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; 6039 + u32 cp_int_cntl; 6060 6040 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 6061 6041 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; 6062 6042 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; ··· 6075 6057 cik_disable_interrupt_state(rdev); 6076 6058 return 0; 6077 6059 } 6060 + 6061 + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & 6062 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 6063 + cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; 6078 6064 6079 6065 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 6080 6066 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
-6
drivers/gpu/drm/radeon/cypress_dpm.c
··· 2014 2014 if (eg_pi->pcie_performance_request) 2015 2015 cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 2016 2016 2017 - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 2018 - if (ret) { 2019 - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); 2020 - return ret; 2021 - } 2022 - 2023 2017 return 0; 2024 2018 } 2025 2019
+10 -2
drivers/gpu/drm/radeon/dce6_afmt.c
··· 28 28 static u32 dce6_endpoint_rreg(struct radeon_device *rdev, 29 29 u32 block_offset, u32 reg) 30 30 { 31 + unsigned long flags; 31 32 u32 r; 32 33 34 + spin_lock_irqsave(&rdev->end_idx_lock, flags); 33 35 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 34 36 r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset); 37 + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); 38 + 35 39 return r; 36 40 } 37 41 38 42 static void dce6_endpoint_wreg(struct radeon_device *rdev, 39 43 u32 block_offset, u32 reg, u32 v) 40 44 { 45 + unsigned long flags; 46 + 47 + spin_lock_irqsave(&rdev->end_idx_lock, flags); 41 48 if (ASIC_IS_DCE8(rdev)) 42 49 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 43 50 else 44 51 WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, 45 52 AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg)); 46 53 WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v); 54 + spin_unlock_irqrestore(&rdev->end_idx_lock, flags); 47 55 } 48 56 49 57 #define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) ··· 94 86 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 95 87 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 96 88 u32 offset = dig->afmt->offset; 97 - u32 id = dig->afmt->pin->id; 98 89 99 90 if (!dig->afmt->pin) 100 91 return; 101 92 102 - WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id)); 93 + WREG32(AFMT_AUDIO_SRC_CONTROL + offset, 94 + AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); 103 95 } 104 96 105 97 void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+129 -35
drivers/gpu/drm/radeon/kv_dpm.c
··· 40 40 static void kv_enable_new_levels(struct radeon_device *rdev); 41 41 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 42 42 struct radeon_ps *new_rps); 43 + static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 43 44 static int kv_set_enabled_levels(struct radeon_device *rdev); 44 45 static int kv_force_dpm_highest(struct radeon_device *rdev); 45 46 static int kv_force_dpm_lowest(struct radeon_device *rdev); ··· 520 519 521 520 static void kv_program_vc(struct radeon_device *rdev) 522 521 { 523 - WREG32_SMC(CG_FTV_0, 0x3FFFC000); 522 + WREG32_SMC(CG_FTV_0, 0x3FFFC100); 524 523 } 525 524 526 525 static void kv_clear_vc(struct radeon_device *rdev) ··· 639 638 640 639 static int kv_unforce_levels(struct radeon_device *rdev) 641 640 { 642 - return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 641 + if (rdev->family == CHIP_KABINI) 642 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 643 + else 644 + return kv_set_enabled_levels(rdev); 643 645 } 644 646 645 647 static int kv_update_sclk_t(struct radeon_device *rdev) ··· 671 667 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 672 668 673 669 if (table && table->count) { 674 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 675 - if ((table->entries[i].clk == pi->boot_pl.sclk) || 676 - (i == 0)) 670 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 671 + if (table->entries[i].clk == pi->boot_pl.sclk) 677 672 break; 678 673 } 679 674 ··· 685 682 if (table->num_max_dpm_entries == 0) 686 683 return -EINVAL; 687 684 688 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 689 - if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) || 690 - (i == 0)) 685 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 686 + if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 691 687 break; 692 688 } 693 689 ··· 1080 1078 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1081 1079 } 1082 1080 1081 + static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1082 + { 1083 + struct kv_power_info *pi = kv_get_pi(rdev); 1084 + 1085 + pi->acp_boot_level = 0xff; 1086 + } 1087 + 1083 1088 static void kv_update_current_ps(struct radeon_device *rdev, 1084 1089 struct radeon_ps *rps) 1085 1090 { ··· 1107 1098 pi->requested_rps = *rps; 1108 1099 pi->requested_ps = *new_ps; 1109 1100 pi->requested_rps.ps_priv = &pi->requested_ps; 1101 + } 1102 + 1103 + void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1104 + { 1105 + struct kv_power_info *pi = kv_get_pi(rdev); 1106 + int ret; 1107 + 1108 + if (pi->bapm_enable) { 1109 + ret = kv_smc_bapm_enable(rdev, enable); 1110 + if (ret) 1111 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1112 + } 1110 1113 } 1111 1114 1112 1115 int kv_dpm_enable(struct radeon_device *rdev) ··· 1213 1192 return ret; 1214 1193 } 1215 1194 1195 + kv_reset_acp_boot_level(rdev); 1196 + 1216 1197 if (rdev->irq.installed && 1217 1198 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1218 1199 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); ··· 1224 1201 } 1225 1202 rdev->irq.dpm_thermal = true; 1226 1203 radeon_irq_set(rdev); 1204 + } 1205 + 1206 + ret = kv_smc_bapm_enable(rdev, false); 1207 + if (ret) { 1208 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1209 + return ret; 1227 1210 } 1228 1211 1229 1212 /* powerdown unused blocks for now */ ··· 1254 1225 RADEON_CG_BLOCK_SDMA | 1255 1226 RADEON_CG_BLOCK_BIF | 1256 1227 RADEON_CG_BLOCK_HDP), false); 1228 + 1229 + kv_smc_bapm_enable(rdev, false); 1257 1230 1258 1231 /* powerup blocks */ 1259 1232 kv_dpm_powergate_acp(rdev, false); ··· 1481 1450 return kv_enable_samu_dpm(rdev, !gate); 1482 1451 } 1483 1452 1453 + static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1454 + { 1455 + u8 i; 1456 + struct radeon_clock_voltage_dependency_table *table = 1457 + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1458 + 1459 + for (i = 0; i < table->count; i++) { 1460 + if (table->entries[i].clk >= 0) /* XXX */ 1461 + break; 1462 + } 1463 + 1464 + if (i >= table->count) 1465 + i = table->count - 1; 1466 + 1467 + return i; 1468 + } 1469 + 1470 + static void kv_update_acp_boot_level(struct radeon_device *rdev) 1471 + { 1472 + struct kv_power_info *pi = kv_get_pi(rdev); 1473 + u8 acp_boot_level; 1474 + 1475 + if (!pi->caps_stable_p_state) { 1476 + acp_boot_level = kv_get_acp_boot_level(rdev); 1477 + if (acp_boot_level != pi->acp_boot_level) { 1478 + pi->acp_boot_level = acp_boot_level; 1479 + kv_send_msg_to_smc_with_parameter(rdev, 1480 + PPSMC_MSG_ACPDPM_SetEnabledMask, 1481 + (1 << pi->acp_boot_level)); 1482 + } 1483 + } 1484 + } 1485 + 1484 1486 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1485 1487 { 1486 1488 struct kv_power_info *pi = kv_get_pi(rdev); ··· 1525 1461 if (pi->caps_stable_p_state) 1526 1462 pi->acp_boot_level = table->count - 1; 1527 1463 else 1528 - pi->acp_boot_level = 0; 1464 + pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1529 1465 1530 1466 ret = kv_copy_bytes_to_smc(rdev, 1531 1467 pi->dpm_table_start + ··· 1652 1588 } 1653 1589 } 1654 1590 1655 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1656 - if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) || 1657 - (i == 0)) { 1658 - pi->highest_valid = i; 1591 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1592 + if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1659 1593 break; 1660 - } 1661 1594 } 1595 + pi->highest_valid = i; 1662 1596 1663 1597 if (pi->lowest_valid > pi->highest_valid) { 1664 1598 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > ··· 1677 1615 } 1678 1616 } 1679 1617 1680 - for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) { 1618 + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1681 1619 if (table->entries[i].sclk_frequency <= 1682 - new_ps->levels[new_ps->num_levels - 1].sclk || 1683 - i == 0) { 1684 - pi->highest_valid = i; 1620 + new_ps->levels[new_ps->num_levels - 1].sclk) 1685 1621 break; 1686 - } 1687 1622 } 1623 + pi->highest_valid = i; 1688 1624 1689 1625 if (pi->lowest_valid > pi->highest_valid) { 1690 1626 if ((new_ps->levels[0].sclk - ··· 1784 1724 RADEON_CG_BLOCK_BIF | 1785 1725 RADEON_CG_BLOCK_HDP), false); 1786 1726 1727 + if (pi->bapm_enable) { 1728 + ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1729 + if (ret) { 1730 + DRM_ERROR("kv_smc_bapm_enable failed\n"); 1731 + return ret; 1732 + } 1733 + } 1734 + 1787 1735 if (rdev->family == CHIP_KABINI) { 1788 1736 if (pi->enable_dpm) { 1789 1737 kv_set_valid_clock_range(rdev, new_ps); ··· 1843 1775 return ret; 1844 1776 } 1845 1777 #endif 1778 + kv_update_acp_boot_level(rdev); 1846 1779 kv_update_sclk_t(rdev); 1847 1780 kv_enable_nb_dpm(rdev); 1848 1781 } ··· 1854 1785 RADEON_CG_BLOCK_BIF | 1855 1786 RADEON_CG_BLOCK_HDP), true); 1856 1787 1857 - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1858 1788 return 0; 1859 1789 } 1860 1790 ··· 1874 1806 1875 1807 void kv_dpm_reset_asic(struct radeon_device *rdev) 1876 1808 { 1877 - kv_force_lowest_valid(rdev); 1878 - kv_init_graphics_levels(rdev); 1879 - kv_program_bootup_state(rdev); 1880 - kv_upload_dpm_settings(rdev); 1881 - kv_force_lowest_valid(rdev); 1882 - kv_unforce_levels(rdev); 1809 + struct kv_power_info *pi = kv_get_pi(rdev); 1810 + 1811 + if (rdev->family == CHIP_KABINI) { 1812 + kv_force_lowest_valid(rdev); 1813 + kv_init_graphics_levels(rdev); 1814 + kv_program_bootup_state(rdev); 1815 + kv_upload_dpm_settings(rdev); 1816 + kv_force_lowest_valid(rdev); 1817 + kv_unforce_levels(rdev); 1818 + } else { 1819 + kv_init_graphics_levels(rdev); 1820 + kv_program_bootup_state(rdev); 1821 + kv_freeze_sclk_dpm(rdev, true); 1822 + kv_upload_dpm_settings(rdev); 1823 + kv_freeze_sclk_dpm(rdev, false); 1824 + kv_set_enabled_level(rdev, pi->graphics_boot_level); 1825 + } 1883 1826 } 1884 1827 1885 1828 //XXX use sumo_dpm_display_configuration_changed ··· 1950 1871 if (ret) 1951 1872 return ret; 1952 1873 1953 - for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) { 1874 + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 1954 1875 if (enable_mask & (1 << i)) 1955 1876 break; 1956 1877 } 1957 1878 1958 - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1879 + if (rdev->family == CHIP_KABINI) 1880 + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1881 + else 1882 + return kv_set_enabled_level(rdev, i); 1959 1883 } 1960 1884 1961 1885 static int kv_force_dpm_lowest(struct radeon_device *rdev) ··· 1975 1893 break; 1976 1894 } 1977 1895 1978 - return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1896 + if (rdev->family == CHIP_KABINI) 1897 + return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 1898 + else 1899 + return kv_set_enabled_level(rdev, i); 1979 1900 } 1980 1901 1981 1902 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, ··· 1996 1911 if (!pi->caps_sclk_ds) 1997 1912 return 0; 1998 1913 1999 - for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) { 1914 + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2000 1915 temp = sclk / sumo_get_sleep_divider_from_id(i); 2001 - if ((temp >= min) || (i == 0)) 1916 + if (temp >= min) 2002 1917 break; 2003 1918 } 2004 1919 ··· 2124 2039 ps->dpmx_nb_ps_lo = 0x1; 2125 2040 ps->dpmx_nb_ps_hi = 0x0; 2126 2041 } else { 2127 - ps->dpm0_pg_nb_ps_lo = 0x1; 2042 + ps->dpm0_pg_nb_ps_lo = 0x3; 2128 2043 ps->dpm0_pg_nb_ps_hi = 0x0; 2129 - ps->dpmx_nb_ps_lo = 0x2; 2130 - ps->dpmx_nb_ps_hi = 0x1; 2044 + ps->dpmx_nb_ps_lo = 0x3; 2045 + ps->dpmx_nb_ps_hi = 0x0; 2131 2046 2132 - if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2047 + if (pi->sys_info.nb_dpm_enable) { 2133 2048 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2134 2049 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2135 2050 pi->disable_nb_ps3_in_battery; ··· 2293 2208 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2294 2209 kv_dpm_power_level_enable(rdev, i, true); 2295 2210 } 2211 + } 2212 + 2213 + static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2214 + { 2215 + u32 new_mask = (1 << level); 2216 + 2217 + return kv_send_msg_to_smc_with_parameter(rdev, 2218 + PPSMC_MSG_SCLKDPM_SetEnabledMask, 2219 + new_mask); 2296 2220 } 2297 2221 2298 2222 static int kv_set_enabled_levels(struct radeon_device *rdev)
+1
drivers/gpu/drm/radeon/kv_dpm.h
··· 192 192 int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 193 193 u32 *value, u32 limit); 194 194 int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable); 195 + int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable); 195 196 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 196 197 u32 smc_start_address, 197 198 const u8 *src, u32 byte_count, u32 limit);
+8
drivers/gpu/drm/radeon/kv_smc.c
··· 107 107 return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable); 108 108 } 109 109 110 + int kv_smc_bapm_enable(struct radeon_device *rdev, bool enable) 111 + { 112 + if (enable) 113 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); 114 + else 115 + return kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); 116 + } 117 + 110 118 int kv_copy_bytes_to_smc(struct radeon_device *rdev, 111 119 u32 smc_start_address, 112 120 const u8 *src, u32 byte_count, u32 limit)
-6
drivers/gpu/drm/radeon/ni_dpm.c
··· 3865 3865 return ret; 3866 3866 } 3867 3867 3868 - ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 3869 - if (ret) { 3870 - DRM_ERROR("ni_dpm_force_performance_level failed\n"); 3871 - return ret; 3872 - } 3873 - 3874 3868 return 0; 3875 3869 } 3876 3870
+2
drivers/gpu/drm/radeon/ppsmc.h
··· 163 163 #define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 164 164 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 165 165 #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 166 + #define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) 167 + #define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) 166 168 #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) 167 169 168 170
+7
drivers/gpu/drm/radeon/r100.c
··· 2853 2853 2854 2854 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg) 2855 2855 { 2856 + unsigned long flags; 2856 2857 uint32_t data; 2857 2858 2859 + spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2858 2860 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f); 2859 2861 r100_pll_errata_after_index(rdev); 2860 2862 data = RREG32(RADEON_CLOCK_CNTL_DATA); 2861 2863 r100_pll_errata_after_data(rdev); 2864 + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2862 2865 return data; 2863 2866 } 2864 2867 2865 2868 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2866 2869 { 2870 + unsigned long flags; 2871 + 2872 + spin_lock_irqsave(&rdev->pll_idx_lock, flags); 2867 2873 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN)); 2868 2874 r100_pll_errata_after_index(rdev); 2869 2875 WREG32(RADEON_CLOCK_CNTL_DATA, v); 2870 2876 r100_pll_errata_after_data(rdev); 2877 + spin_unlock_irqrestore(&rdev->pll_idx_lock, flags); 2871 2878 } 2872 2879 2873 2880 static void r100_set_safe_registers(struct radeon_device *rdev)
+7
drivers/gpu/drm/radeon/r420.c
··· 160 160 161 161 u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) 162 162 { 163 + unsigned long flags; 163 164 u32 r; 164 165 166 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 165 167 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); 166 168 r = RREG32(R_0001FC_MC_IND_DATA); 169 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 167 170 return r; 168 171 } 169 172 170 173 void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 171 174 { 175 + unsigned long flags; 176 + 177 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 172 178 WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | 173 179 S_0001F8_MC_IND_WR_EN(1)); 174 180 WREG32(R_0001FC_MC_IND_DATA, v); 181 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 175 182 } 176 183 177 184 static void r420_debugfs(struct radeon_device *rdev)
+19
drivers/gpu/drm/radeon/r600.c
··· 119 119 return rdev->clock.spll.reference_freq; 120 120 } 121 121 122 + int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 123 + { 124 + return 0; 125 + } 126 + 122 127 /* get temperature in millidegrees */ 123 128 int rv6xx_get_temp(struct radeon_device *rdev) 124 129 { ··· 1050 1045 1051 1046 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1052 1047 { 1048 + unsigned long flags; 1053 1049 uint32_t r; 1054 1050 1051 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1055 1052 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1056 1053 r = RREG32(R_0028FC_MC_DATA); 1057 1054 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1055 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1058 1056 return r; 1059 1057 } 1060 1058 1061 1059 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1062 1060 { 1061 + unsigned long flags; 1062 + 1063 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 1063 1064 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1064 1065 S_0028F8_MC_IND_WR_EN(1)); 1065 1066 WREG32(R_0028FC_MC_DATA, v); 1066 1067 WREG32(R_0028F8_MC_INDEX, 0x7F); 1068 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 1067 1069 } 1068 1070 1069 1071 static void r600_mc_program(struct radeon_device *rdev) ··· 2104 2092 */ 2105 2093 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2106 2094 { 2095 + unsigned long flags; 2107 2096 u32 r; 2108 2097 2098 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2109 2099 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2110 2100 (void)RREG32(PCIE_PORT_INDEX); 2111 2101 r = RREG32(PCIE_PORT_DATA); 2102 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2112 2103 return r; 2113 2104 } 2114 2105 2115 2106 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2116 2107 { 2108 + unsigned long flags; 2109 + 2110 + spin_lock_irqsave(&rdev->pciep_idx_lock, flags); 2117 2111 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2118 2112 (void)RREG32(PCIE_PORT_INDEX); 2119 2113 WREG32(PCIE_PORT_DATA, (v)); 2120 2114 (void)RREG32(PCIE_PORT_DATA); 2115 + spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); 2121 2116 } 2122 2117 2123 2118 /*
+14 -24
drivers/gpu/drm/radeon/r600_dpm.c
··· 1219 1219 1220 1220 void r600_free_extended_power_table(struct radeon_device *rdev) 1221 1221 { 1222 - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries) 1223 - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); 1224 - if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) 1225 - kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); 1226 - if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) 1227 - kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); 1228 - if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) 1229 - kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries); 1230 - if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) 1231 - kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries); 1232 - if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) 1233 - kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries); 1234 - if (rdev->pm.dpm.dyn_state.ppm_table) 1235 - kfree(rdev->pm.dpm.dyn_state.ppm_table); 1236 - if (rdev->pm.dpm.dyn_state.cac_tdp_table) 1237 - kfree(rdev->pm.dpm.dyn_state.cac_tdp_table); 1238 - if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) 1239 - kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries); 1240 - if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) 1241 - kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries); 1242 - if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) 1243 - kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries); 1244 - if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) 1245 - kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries); 1222 + struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state; 1223 + 1224 + kfree(dyn_state->vddc_dependency_on_sclk.entries); 1225 + kfree(dyn_state->vddci_dependency_on_mclk.entries); 1226 + kfree(dyn_state->vddc_dependency_on_mclk.entries); 1227 + kfree(dyn_state->mvdd_dependency_on_mclk.entries); 1228 + kfree(dyn_state->cac_leakage_table.entries); 1229 + kfree(dyn_state->phase_shedding_limits_table.entries); 1230 + kfree(dyn_state->ppm_table); 1231 + kfree(dyn_state->cac_tdp_table); 1232 + kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 1233 + kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 1234 + kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 1235 + kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 1246 1236 } 1247 1237 1248 1238 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
+1 -1
drivers/gpu/drm/radeon/r600d.h
··· 1040 1040 # define HDMI0_AVI_INFO_CONT (1 << 1) 1041 1041 # define HDMI0_AUDIO_INFO_SEND (1 << 4) 1042 1042 # define HDMI0_AUDIO_INFO_CONT (1 << 5) 1043 - # define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 1043 + # define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */ 1044 1044 # define HDMI0_AUDIO_INFO_UPDATE (1 << 7) 1045 1045 # define HDMI0_MPEG_INFO_SEND (1 << 8) 1046 1046 # define HDMI0_MPEG_INFO_CONT (1 << 9)
+81 -1
drivers/gpu/drm/radeon/radeon.h
··· 181 181 #define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16) 182 182 183 183 /* PG flags */ 184 - #define RADEON_PG_SUPPORT_GFX_CG (1 << 0) 184 + #define RADEON_PG_SUPPORT_GFX_PG (1 << 0) 185 185 #define RADEON_PG_SUPPORT_GFX_SMG (1 << 1) 186 186 #define RADEON_PG_SUPPORT_GFX_DMG (1 << 2) 187 187 #define RADEON_PG_SUPPORT_UVD (1 << 3) ··· 1778 1778 int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level); 1779 1779 bool (*vblank_too_short)(struct radeon_device *rdev); 1780 1780 void (*powergate_uvd)(struct radeon_device *rdev, bool gate); 1781 + void (*enable_bapm)(struct radeon_device *rdev, bool enable); 1781 1782 } dpm; 1782 1783 /* pageflipping */ 1783 1784 struct { ··· 2111 2110 resource_size_t rmmio_size; 2112 2111 /* protects concurrent MM_INDEX/DATA based register access */ 2113 2112 spinlock_t mmio_idx_lock; 2113 + /* protects concurrent SMC based register access */ 2114 + spinlock_t smc_idx_lock; 2115 + /* protects concurrent PLL register access */ 2116 + spinlock_t pll_idx_lock; 2117 + /* protects concurrent MC register access */ 2118 + spinlock_t mc_idx_lock; 2119 + /* protects concurrent PCIE register access */ 2120 + spinlock_t pcie_idx_lock; 2121 + /* protects concurrent PCIE_PORT register access */ 2122 + spinlock_t pciep_idx_lock; 2123 + /* protects concurrent PIF register access */ 2124 + spinlock_t pif_idx_lock; 2125 + /* protects concurrent CG register access */ 2126 + spinlock_t cg_idx_lock; 2127 + /* protects concurrent UVD register access */ 2128 + spinlock_t uvd_idx_lock; 2129 + /* protects concurrent RCU register access */ 2130 + spinlock_t rcu_idx_lock; 2131 + /* protects concurrent DIDT register access */ 2132 + spinlock_t didt_idx_lock; 2133 + /* protects concurrent ENDPOINT (audio) register access */ 2134 + spinlock_t end_idx_lock; 2114 2135 void __iomem *rmmio; 2115 2136 radeon_rreg_t mc_rreg; 2116 2137 radeon_wreg_t mc_wreg; ··· 2300 2277 */ 2301 2278 static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) 2302 2279 { 2280 + unsigned long flags; 2303 2281 uint32_t r; 2304 2282 2283 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2305 2284 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2306 2285 r = RREG32(RADEON_PCIE_DATA); 2286 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2307 2287 return r; 2308 2288 } 2309 2289 2310 2290 static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 2311 2291 { 2292 + unsigned long flags; 2293 + 2294 + spin_lock_irqsave(&rdev->pcie_idx_lock, flags); 2312 2295 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); 2313 2296 WREG32(RADEON_PCIE_DATA, (v)); 2297 + spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); 2314 2298 } 2315 2299 2316 2300 static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) 2317 2301 { 2302 + unsigned long flags; 2318 2303 u32 r; 2319 2304 2305 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2320 2306 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2321 2307 r = RREG32(TN_SMC_IND_DATA_0); 2308 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2322 2309 return r; 2323 2310 } 2324 2311 2325 2312 static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2326 2313 { 2314 + unsigned long flags; 2315 + 2316 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 2327 2317 WREG32(TN_SMC_IND_INDEX_0, (reg)); 2328 2318 WREG32(TN_SMC_IND_DATA_0, (v)); 2319 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 2329 2320 } 2330 2321 2331 2322 static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 2332 2323 { 2324 + unsigned long flags; 2333 2325 u32 r; 2334 2326 2327 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2335 2328 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2336 2329 r = RREG32(R600_RCU_DATA); 2330 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2337 2331 return r; 2338 2332 } 2339 2333 2340 2334 static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2341 2335 { 2336 + unsigned long flags; 2337 + 2338 + spin_lock_irqsave(&rdev->rcu_idx_lock, flags); 2342 2339 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 2343 2340 WREG32(R600_RCU_DATA, (v)); 2341 + spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); 2344 2342 } 2345 2343 2346 2344 static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) 2347 2345 { 2346 + unsigned long flags; 2348 2347 u32 r; 2349 2348 2349 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2350 2350 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2351 2351 r = RREG32(EVERGREEN_CG_IND_DATA); 2352 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2352 2353 return r; 2353 2354 } 2354 2355 2355 2356 static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2356 2357 { 2358 + unsigned long flags; 2359 + 2360 + spin_lock_irqsave(&rdev->cg_idx_lock, flags); 2357 2361 WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); 2358 2362 WREG32(EVERGREEN_CG_IND_DATA, (v)); 2363 + spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); 2359 2364 } 2360 2365 2361 2366 static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) 2362 2367 { 2368 + unsigned long flags; 2363 2369 u32 r; 2364 2370 2371 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2365 2372 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2366 2373 r = RREG32(EVERGREEN_PIF_PHY0_DATA); 2374 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2367 2375 return r; 2368 2376 } 2369 2377 2370 2378 static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2371 2379 { 2380 + unsigned long flags; 2381 + 2382 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2372 2383 WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); 2373 2384 WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); 2385 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2374 2386 } 2375 2387 2376 2388 static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) 2377 2389 { 2390 + unsigned long flags; 2378 2391 u32 r; 2379 2392 2393 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2380 2394 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2381 2395 r = RREG32(EVERGREEN_PIF_PHY1_DATA); 2396 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2382 2397 return r; 2383 2398 } 2384 2399 2385 2400 static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2386 2401 { 2402 + unsigned long flags; 2403 + 2404 + spin_lock_irqsave(&rdev->pif_idx_lock, flags); 2387 2405 WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); 2388 2406 WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); 2407 + spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); 2389 2408 } 2390 2409 2391 2410 static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 2392 2411 { 2412 + unsigned long flags; 2393 2413 u32 r; 2394 2414 2415 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2395 2416 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2396 2417 r = RREG32(R600_UVD_CTX_DATA); 2418 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2397 2419 return r; 2398 2420 } 2399 2421 2400 2422 static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2401 2423 { 2424 + unsigned long flags; 2425 + 2426 + spin_lock_irqsave(&rdev->uvd_idx_lock, flags); 2402 2427 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 2403 2428 WREG32(R600_UVD_CTX_DATA, (v)); 2429 + spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); 2404 2430 } 2405 2431 2406 2432 2407 2433 static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) 2408 2434 { 2435 + unsigned long flags; 2409 2436 u32 r; 2410 2437 2438 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2411 2439 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2412 2440 r = RREG32(CIK_DIDT_IND_DATA); 2441 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2413 2442 return r; 2414 2443 } 2415 2444 2416 2445 static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2417 2446 { 2447 + unsigned long flags; 2448 + 2449 + spin_lock_irqsave(&rdev->didt_idx_lock, flags); 2418 2450 WREG32(CIK_DIDT_IND_INDEX, (reg)); 2419 2451 WREG32(CIK_DIDT_IND_DATA, (v)); 2452 + spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); 2420 2453 } 2421 2454 2422 2455 void r100_pll_errata_after_index(struct radeon_device *rdev); ··· 2648 2569 #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) 2649 2570 #define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) 2650 2571 #define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) 2572 + #define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) 2651 2573 2652 2574 /* Common functions */ 2653 2575 /* AGP */
+8 -3
drivers/gpu/drm/radeon/radeon_asic.c
··· 1037 1037 .set_pcie_lanes = &r600_set_pcie_lanes, 1038 1038 .set_clock_gating = NULL, 1039 1039 .get_temperature = &rv6xx_get_temp, 1040 + .set_uvd_clocks = &r600_set_uvd_clocks, 1040 1041 }, 1041 1042 .dpm = { 1042 1043 .init = &rv6xx_dpm_init, ··· 1127 1126 .set_pcie_lanes = NULL, 1128 1127 .set_clock_gating = NULL, 1129 1128 .get_temperature = &rv6xx_get_temp, 1129 + .set_uvd_clocks = &r600_set_uvd_clocks, 1130 1130 }, 1131 1131 .dpm = { 1132 1132 .init = &rs780_dpm_init, ··· 1143 1141 .get_mclk = &rs780_dpm_get_mclk, 1144 1142 .print_power_state = &rs780_dpm_print_power_state, 1145 1143 .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, 1144 + .force_performance_level = &rs780_dpm_force_performance_level, 1146 1145 }, 1147 1146 .pflip = { 1148 1147 .pre_page_flip = &rs600_pre_page_flip, ··· 1794 1791 .print_power_state = &trinity_dpm_print_power_state, 1795 1792 .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, 1796 1793 .force_performance_level = &trinity_dpm_force_performance_level, 1794 + .enable_bapm = &trinity_dpm_enable_bapm, 1797 1795 }, 1798 1796 .pflip = { 1799 1797 .pre_page_flip = &evergreen_pre_page_flip, ··· 2170 2166 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, 2171 2167 .force_performance_level = &kv_dpm_force_performance_level, 2172 2168 .powergate_uvd = &kv_dpm_powergate_uvd, 2169 + .enable_bapm = &kv_dpm_enable_bapm, 2173 2170 }, 2174 2171 .pflip = { 2175 2172 .pre_page_flip = &evergreen_pre_page_flip, ··· 2395 2390 RADEON_CG_SUPPORT_HDP_LS | 2396 2391 RADEON_CG_SUPPORT_HDP_MGCG; 2397 2392 rdev->pg_flags = 0 | 2398 - /*RADEON_PG_SUPPORT_GFX_CG | */ 2393 + /*RADEON_PG_SUPPORT_GFX_PG | */ 2399 2394 RADEON_PG_SUPPORT_SDMA; 2400 2395 break; 2401 2396 case CHIP_OLAND: ··· 2484 2479 RADEON_CG_SUPPORT_HDP_LS | 2485 2480 RADEON_CG_SUPPORT_HDP_MGCG; 2486 2481 rdev->pg_flags = 0; 2487 - /*RADEON_PG_SUPPORT_GFX_CG | 2482 + /*RADEON_PG_SUPPORT_GFX_PG | 2488 2483 RADEON_PG_SUPPORT_GFX_SMG | 2489 2484 RADEON_PG_SUPPORT_GFX_DMG | 2490 2485 RADEON_PG_SUPPORT_UVD | ··· 2512 2507 RADEON_CG_SUPPORT_HDP_LS | 2513 2508 RADEON_CG_SUPPORT_HDP_MGCG; 2514 2509 rdev->pg_flags = 0; 2515 - /*RADEON_PG_SUPPORT_GFX_CG | 2510 + /*RADEON_PG_SUPPORT_GFX_PG | 2516 2511 RADEON_PG_SUPPORT_GFX_SMG | 2517 2512 RADEON_PG_SUPPORT_UVD | 2518 2513 RADEON_PG_SUPPORT_VCE |
+5
drivers/gpu/drm/radeon/radeon_asic.h
··· 389 389 u32 r600_get_xclk(struct radeon_device *rdev); 390 390 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); 391 391 int rv6xx_get_temp(struct radeon_device *rdev); 392 + int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 392 393 int r600_dpm_pre_set_power_state(struct radeon_device *rdev); 393 394 void r600_dpm_post_set_power_state(struct radeon_device *rdev); 394 395 /* r600 dma */ ··· 429 428 struct radeon_ps *ps); 430 429 void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 431 430 struct seq_file *m); 431 + int rs780_dpm_force_performance_level(struct radeon_device *rdev, 432 + enum radeon_dpm_forced_level level); 432 433 433 434 /* 434 435 * rv770,rv730,rv710,rv740 ··· 628 625 struct seq_file *m); 629 626 int trinity_dpm_force_performance_level(struct radeon_device *rdev, 630 627 enum radeon_dpm_forced_level level); 628 + void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); 631 629 632 630 /* DCE6 - SI */ 633 631 void dce6_bandwidth_update(struct radeon_device *rdev); ··· 785 781 int kv_dpm_force_performance_level(struct radeon_device *rdev, 786 782 enum radeon_dpm_forced_level level); 787 783 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 784 + void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); 788 785 789 786 /* uvd v1.0 */ 790 787 uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+65 -4
drivers/gpu/drm/radeon/radeon_connectors.c
··· 396 396 } 397 397 } 398 398 399 + if (property == rdev->mode_info.audio_property) { 400 + struct radeon_connector *radeon_connector = to_radeon_connector(connector); 401 + /* need to find digital encoder on connector */ 402 + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 403 + if (!encoder) 404 + return 0; 405 + 406 + radeon_encoder = to_radeon_encoder(encoder); 407 + 408 + if (radeon_connector->audio != val) { 409 + radeon_connector->audio = val; 410 + radeon_property_change_mode(&radeon_encoder->base); 411 + } 412 + } 413 + 399 414 if (property == rdev->mode_info.underscan_property) { 400 415 /* need to find digital encoder on connector */ 401 416 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); ··· 1435 1420 if (radeon_dp_getdpcd(radeon_connector)) 1436 1421 ret = connector_status_connected; 1437 1422 } else { 1438 - /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ 1423 + /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ 1439 1424 if (radeon_ddc_probe(radeon_connector, false)) 1440 1425 ret = connector_status_connected; 1441 1426 } ··· 1500 1485 .detect = radeon_dp_detect, 1501 1486 .fill_modes = drm_helper_probe_single_connector_modes, 1502 1487 .set_property = radeon_connector_set_property, 1488 + .destroy = radeon_dp_connector_destroy, 1489 + .force = radeon_dvi_force, 1490 + }; 1491 + 1492 + static const struct drm_connector_funcs radeon_edp_connector_funcs = { 1493 + .dpms = drm_helper_connector_dpms, 1494 + .detect = radeon_dp_detect, 1495 + .fill_modes = drm_helper_probe_single_connector_modes, 1496 + .set_property = radeon_lvds_set_property, 1497 + .destroy = radeon_dp_connector_destroy, 1498 + .force = radeon_dvi_force, 1499 + }; 1500 + 1501 + static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = { 1502 + .dpms = drm_helper_connector_dpms, 1503 + .detect = radeon_dp_detect, 1504 + .fill_modes = drm_helper_probe_single_connector_modes, 1505 + .set_property = radeon_lvds_set_property, 1503 1506 .destroy = radeon_dp_connector_destroy, 1504 1507 .force = radeon_dvi_force, 1505 1508 }; ··· 1613 1580 goto failed; 1614 1581 radeon_dig_connector->igp_lane_info = igp_lane_info; 1615 1582 radeon_connector->con_priv = radeon_dig_connector; 1616 - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1617 - drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1618 1583 if (i2c_bus->valid) { 1619 1584 /* add DP i2c bus */ 1620 1585 if (connector_type == DRM_MODE_CONNECTOR_eDP) ··· 1629 1598 case DRM_MODE_CONNECTOR_VGA: 1630 1599 case DRM_MODE_CONNECTOR_DVIA: 1631 1600 default: 1601 + drm_connector_init(dev, &radeon_connector->base, 1602 + &radeon_dp_connector_funcs, connector_type); 1603 + drm_connector_helper_add(&radeon_connector->base, 1604 + &radeon_dp_connector_helper_funcs); 1632 1605 connector->interlace_allowed = true; 1633 1606 connector->doublescan_allowed = true; 1634 1607 radeon_connector->dac_load_detect = true; ··· 1645 1610 case DRM_MODE_CONNECTOR_HDMIA: 1646 1611 case DRM_MODE_CONNECTOR_HDMIB: 1647 1612 case DRM_MODE_CONNECTOR_DisplayPort: 1613 + drm_connector_init(dev, &radeon_connector->base, 1614 + &radeon_dp_connector_funcs, connector_type); 1615 + drm_connector_helper_add(&radeon_connector->base, 1616 + &radeon_dp_connector_helper_funcs); 1648 1617 drm_object_attach_property(&radeon_connector->base.base, 1649 1618 rdev->mode_info.underscan_property, 1650 1619 UNDERSCAN_OFF); ··· 1658 1619 drm_object_attach_property(&radeon_connector->base.base, 1659 1620 rdev->mode_info.underscan_vborder_property, 1660 1621 0); 1622 + drm_object_attach_property(&radeon_connector->base.base, 1623 + rdev->mode_info.audio_property, 1624 + RADEON_AUDIO_DISABLE); 1661 1625 subpixel_order = SubPixelHorizontalRGB; 1662 1626 connector->interlace_allowed = true; 1663 1627 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1676 1634 break; 1677 1635 case DRM_MODE_CONNECTOR_LVDS: 1678 1636 case DRM_MODE_CONNECTOR_eDP: 1637 + drm_connector_init(dev, &radeon_connector->base, 1638 + &radeon_lvds_bridge_connector_funcs, connector_type); 1639 + drm_connector_helper_add(&radeon_connector->base, 1640 + &radeon_dp_connector_helper_funcs); 1679 1641 drm_object_attach_property(&radeon_connector->base.base, 1680 1642 dev->mode_config.scaling_mode_property, 1681 1643 DRM_MODE_SCALE_FULLSCREEN); ··· 1754 1708 rdev->mode_info.underscan_vborder_property, 1755 1709 0); 1756 1710 } 1711 + if (ASIC_IS_DCE2(rdev)) { 1712 + drm_object_attach_property(&radeon_connector->base.base, 1713 + rdev->mode_info.audio_property, 1714 + RADEON_AUDIO_DISABLE); 1715 + } 1757 1716 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1758 1717 radeon_connector->dac_load_detect = true; 1759 1718 drm_object_attach_property(&radeon_connector->base.base, ··· 1799 1748 rdev->mode_info.underscan_vborder_property, 1800 1749 0); 1801 1750 } 1751 + if (ASIC_IS_DCE2(rdev)) { 1752 + drm_object_attach_property(&radeon_connector->base.base, 1753 + rdev->mode_info.audio_property, 1754 + RADEON_AUDIO_DISABLE); 1755 + } 1802 1756 subpixel_order = SubPixelHorizontalRGB; 1803 1757 connector->interlace_allowed = true; 1804 1758 if (connector_type == DRM_MODE_CONNECTOR_HDMIB) ··· 1843 1787 rdev->mode_info.underscan_vborder_property, 1844 1788 0); 1845 1789 } 1790 + if (ASIC_IS_DCE2(rdev)) { 1791 + drm_object_attach_property(&radeon_connector->base.base, 1792 + rdev->mode_info.audio_property, 1793 + RADEON_AUDIO_DISABLE); 1794 + } 1846 1795 connector->interlace_allowed = true; 1847 1796 /* in theory with a DP to VGA converter... */ 1848 1797 connector->doublescan_allowed = false; ··· 1858 1797 goto failed; 1859 1798 radeon_dig_connector->igp_lane_info = igp_lane_info; 1860 1799 radeon_connector->con_priv = radeon_dig_connector; 1861 - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1800 + drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type); 1862 1801 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1863 1802 if (i2c_bus->valid) { 1864 1803 /* add DP i2c bus */
+8 -3
drivers/gpu/drm/radeon/radeon_cs.c
··· 28 28 #include <drm/radeon_drm.h> 29 29 #include "radeon_reg.h" 30 30 #include "radeon.h" 31 + #include "radeon_trace.h" 31 32 32 33 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 33 34 { ··· 81 80 p->relocs[i].lobj.bo = p->relocs[i].robj; 82 81 p->relocs[i].lobj.written = !!r->write_domain; 83 82 84 - /* the first reloc of an UVD job is the 85 - msg and that must be in VRAM */ 86 - if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) { 83 + /* the first reloc of an UVD job is the msg and that must be in 84 + VRAM, also but everything into VRAM on AGP cards to avoid 85 + image corruptions */ 86 + if (p->ring == R600_RING_TYPE_UVD_INDEX && 87 + (i == 0 || p->rdev->flags & RADEON_IS_AGP)) { 87 88 /* TODO: is this still needed for NI+ ? */ 88 89 p->relocs[i].lobj.domain = 89 90 RADEON_GEM_DOMAIN_VRAM; ··· 561 558 r = radeon_cs_handle_lockup(rdev, r); 562 559 return r; 563 560 } 561 + 562 + trace_radeon_cs(&parser); 564 563 565 564 r = radeon_cs_ib_chunk(rdev, &parser); 566 565 if (r) {
+11
drivers/gpu/drm/radeon/radeon_device.c
··· 1249 1249 /* Registers mapping */ 1250 1250 /* TODO: block userspace mapping of io register */ 1251 1251 spin_lock_init(&rdev->mmio_idx_lock); 1252 + spin_lock_init(&rdev->smc_idx_lock); 1253 + spin_lock_init(&rdev->pll_idx_lock); 1254 + spin_lock_init(&rdev->mc_idx_lock); 1255 + spin_lock_init(&rdev->pcie_idx_lock); 1256 + spin_lock_init(&rdev->pciep_idx_lock); 1257 + spin_lock_init(&rdev->pif_idx_lock); 1258 + spin_lock_init(&rdev->cg_idx_lock); 1259 + spin_lock_init(&rdev->uvd_idx_lock); 1260 + spin_lock_init(&rdev->rcu_idx_lock); 1261 + spin_lock_init(&rdev->didt_idx_lock); 1262 + spin_lock_init(&rdev->end_idx_lock); 1252 1263 if (rdev->family >= CHIP_BONAIRE) { 1253 1264 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5); 1254 1265 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
+12
drivers/gpu/drm/radeon/radeon_display.c
··· 1172 1172 { UNDERSCAN_AUTO, "auto" }, 1173 1173 }; 1174 1174 1175 + static struct drm_prop_enum_list radeon_audio_enum_list[] = 1176 + { { RADEON_AUDIO_DISABLE, "off" }, 1177 + { RADEON_AUDIO_ENABLE, "on" }, 1178 + { RADEON_AUDIO_AUTO, "auto" }, 1179 + }; 1180 + 1175 1181 static int radeon_modeset_create_props(struct radeon_device *rdev) 1176 1182 { 1177 1183 int sz; ··· 1227 1221 "underscan vborder", 0, 128); 1228 1222 if (!rdev->mode_info.underscan_vborder_property) 1229 1223 return -ENOMEM; 1224 + 1225 + sz = ARRAY_SIZE(radeon_audio_enum_list); 1226 + rdev->mode_info.audio_property = 1227 + drm_property_create_enum(rdev->ddev, 0, 1228 + "audio", 1229 + radeon_audio_enum_list, sz); 1230 1230 1231 1231 return 0; 1232 1232 }
+1 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 153 153 int radeon_testing = 0; 154 154 int radeon_connector_table = 0; 155 155 int radeon_tv = 1; 156 - int radeon_audio = 0; 156 + int radeon_audio = 1; 157 157 int radeon_disp_priority = 0; 158 158 int radeon_hw_i2c = 0; 159 159 int radeon_pcie_gen2 = -1;
+9
drivers/gpu/drm/radeon/radeon_mode.h
··· 247 247 struct drm_property *underscan_property; 248 248 struct drm_property *underscan_hborder_property; 249 249 struct drm_property *underscan_vborder_property; 250 + /* audio */ 251 + struct drm_property *audio_property; 250 252 /* hardcoded DFP edid from BIOS */ 251 253 struct edid *bios_hardcoded_edid; 252 254 int bios_hardcoded_edid_size; ··· 473 471 u8 cd_mux_state; 474 472 }; 475 473 474 + enum radeon_connector_audio { 475 + RADEON_AUDIO_DISABLE = 0, 476 + RADEON_AUDIO_ENABLE = 1, 477 + RADEON_AUDIO_AUTO = 2 478 + }; 479 + 476 480 struct radeon_connector { 477 481 struct drm_connector base; 478 482 uint32_t connector_id; ··· 497 489 struct radeon_hpd hpd; 498 490 struct radeon_router router; 499 491 struct radeon_i2c_chan *router_bus; 492 + enum radeon_connector_audio audio; 500 493 }; 501 494 502 495 struct radeon_framebuffer {
+66 -15
drivers/gpu/drm/radeon/radeon_pm.c
··· 67 67 68 68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 69 69 { 70 - if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 70 + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 71 + mutex_lock(&rdev->pm.mutex); 72 + if (power_supply_is_system_supplied() > 0) 73 + rdev->pm.dpm.ac_power = true; 74 + else 75 + rdev->pm.dpm.ac_power = false; 76 + if (rdev->asic->dpm.enable_bapm) 77 + radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 78 + mutex_unlock(&rdev->pm.mutex); 79 + } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 71 80 if (rdev->pm.profile == PM_PROFILE_AUTO) { 72 81 mutex_lock(&rdev->pm.mutex); 73 82 radeon_pm_update_profile(rdev); ··· 342 333 struct device_attribute *attr, 343 334 char *buf) 344 335 { 345 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 336 + struct drm_device *ddev = dev_get_drvdata(dev); 346 337 struct radeon_device *rdev = ddev->dev_private; 347 338 int cp = rdev->pm.profile; 348 339 ··· 358 349 const char *buf, 359 350 size_t count) 360 351 { 361 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 352 + struct drm_device *ddev = dev_get_drvdata(dev); 362 353 struct radeon_device *rdev = ddev->dev_private; 363 354 364 355 mutex_lock(&rdev->pm.mutex); ··· 392 383 struct device_attribute *attr, 393 384 char *buf) 394 385 { 395 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 386 + struct drm_device *ddev = dev_get_drvdata(dev); 396 387 struct radeon_device *rdev = ddev->dev_private; 397 388 int pm = rdev->pm.pm_method; 398 389 ··· 406 397 const char *buf, 407 398 size_t count) 408 399 { 409 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 400 + struct drm_device *ddev = dev_get_drvdata(dev); 410 401 struct radeon_device *rdev = ddev->dev_private; 411 402 412 403 /* we don't support the legacy modes with dpm */ ··· 442 433 struct device_attribute *attr, 443 434 char *buf) 444 435 { 445 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 436 + struct drm_device *ddev = dev_get_drvdata(dev); 446 437 struct radeon_device *rdev = ddev->dev_private; 447 438 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 448 439 ··· 456 447 const char *buf, 457 448 size_t count) 458 449 { 459 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 450 + struct drm_device *ddev = dev_get_drvdata(dev); 460 451 struct radeon_device *rdev = ddev->dev_private; 461 452 462 453 mutex_lock(&rdev->pm.mutex); ··· 481 472 struct device_attribute *attr, 482 473 char *buf) 483 474 { 484 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 475 + struct drm_device *ddev = dev_get_drvdata(dev); 485 476 struct radeon_device *rdev = ddev->dev_private; 486 477 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 487 478 ··· 495 486 const char *buf, 496 487 size_t count) 497 488 { 498 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 489 + struct drm_device *ddev = dev_get_drvdata(dev); 499 490 struct radeon_device *rdev = ddev->dev_private; 500 491 enum radeon_dpm_forced_level level; 501 492 int ret = 0; ··· 533 524 struct device_attribute *attr, 534 525 char *buf) 535 526 { 536 - struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); 527 + struct drm_device *ddev = dev_get_drvdata(dev); 537 528 struct radeon_device *rdev = ddev->dev_private; 538 529 int temp; 539 530 ··· 541 532 temp = radeon_get_temperature(rdev); 542 533 else 543 534 temp = 0; 535 + 536 + return snprintf(buf, PAGE_SIZE, "%d\n", temp); 537 + } 538 + 539 + static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 540 + struct device_attribute *attr, 541 + char *buf) 542 + { 543 + struct drm_device *ddev = dev_get_drvdata(dev); 544 + struct radeon_device *rdev = ddev->dev_private; 545 + int hyst = to_sensor_dev_attr(attr)->index; 546 + int temp; 547 + 548 + if (hyst) 549 + temp = rdev->pm.dpm.thermal.min_temp; 550 + else 551 + temp = rdev->pm.dpm.thermal.max_temp; 544 552 545 553 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 546 554 } ··· 570 544 } 571 545 572 546 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 547 + static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 548 + static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 573 549 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); 574 550 575 551 static struct attribute *hwmon_attributes[] = { 576 552 &sensor_dev_attr_temp1_input.dev_attr.attr, 553 + &sensor_dev_attr_temp1_crit.dev_attr.attr, 554 + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 577 555 &sensor_dev_attr_name.dev_attr.attr, 578 556 NULL 579 557 }; 580 558 559 + static umode_t hwmon_attributes_visible(struct kobject *kobj, 560 + struct attribute *attr, int index) 561 + { 562 + struct device *dev = container_of(kobj, struct device, kobj); 563 + struct drm_device *ddev = dev_get_drvdata(dev); 564 + struct radeon_device *rdev = ddev->dev_private; 565 + 566 + /* Skip limit attributes if DPM is not enabled */ 567 + if (rdev->pm.pm_method != PM_METHOD_DPM && 568 + (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 569 + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 570 + return 0; 571 + 572 + return attr->mode; 573 + } 574 + 581 575 static const struct attribute_group hwmon_attrgroup = { 582 576 .attrs = hwmon_attributes, 577 + .is_visible = hwmon_attributes_visible, 583 578 }; 584 579 585 580 static int radeon_hwmon_init(struct radeon_device *rdev) ··· 917 870 918 871 radeon_dpm_post_set_power_state(rdev); 919 872 920 - /* force low perf level for thermal */ 921 - if (rdev->pm.dpm.thermal_active && 922 - rdev->asic->dpm.force_performance_level) { 923 - radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 873 + if (rdev->asic->dpm.force_performance_level) { 874 + if (rdev->pm.dpm.thermal_active) 875 + /* force low perf level for thermal */ 876 + radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 877 + else 878 + /* otherwise, enable auto */ 879 + radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 924 880 } 925 881 926 882 done: ··· 1152 1102 { 1153 1103 int ret; 1154 1104 1155 - /* default to performance state */ 1105 + /* default to balanced state */ 1156 1106 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1157 1107 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1108 + rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1158 1109 rdev->pm.default_sclk = rdev->clock.default_sclk; 1159 1110 rdev->pm.default_mclk = rdev->clock.default_mclk; 1160 1111 rdev->pm.current_sclk = rdev->clock.default_sclk;
+20 -7
drivers/gpu/drm/radeon/radeon_trace.h
··· 27 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 28 28 ); 29 29 30 + TRACE_EVENT(radeon_cs, 31 + TP_PROTO(struct radeon_cs_parser *p), 32 + TP_ARGS(p), 33 + TP_STRUCT__entry( 34 + __field(u32, ring) 35 + __field(u32, dw) 36 + __field(u32, fences) 37 + ), 38 + 39 + TP_fast_assign( 40 + __entry->ring = p->ring; 41 + __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; 42 + __entry->fences = radeon_fence_count_emitted( 43 + p->rdev, p->ring); 44 + ), 45 + TP_printk("ring=%u, dw=%u, fences=%u", 46 + __entry->ring, __entry->dw, 47 + __entry->fences) 48 + ); 49 + 30 50 DECLARE_EVENT_CLASS(radeon_fence_request, 31 51 32 52 TP_PROTO(struct drm_device *dev, u32 seqno), ··· 67 47 ); 68 48 69 49 DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, 70 - 71 - TP_PROTO(struct drm_device *dev, u32 seqno), 72 - 73 - TP_ARGS(dev, seqno) 74 - ); 75 - 76 - DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, 77 50 78 51 TP_PROTO(struct drm_device *dev, u32 seqno), 79 52
+7
drivers/gpu/drm/radeon/rs400.c
··· 274 274 275 275 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 276 276 { 277 + unsigned long flags; 277 278 uint32_t r; 278 279 280 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 279 281 WREG32(RS480_NB_MC_INDEX, reg & 0xff); 280 282 r = RREG32(RS480_NB_MC_DATA); 281 283 WREG32(RS480_NB_MC_INDEX, 0xff); 284 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 282 285 return r; 283 286 } 284 287 285 288 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 286 289 { 290 + unsigned long flags; 291 + 292 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 287 293 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); 288 294 WREG32(RS480_NB_MC_DATA, (v)); 289 295 WREG32(RS480_NB_MC_INDEX, 0xff); 296 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 290 297 } 291 298 292 299 #if defined(CONFIG_DEBUG_FS)
+11 -1
drivers/gpu/drm/radeon/rs600.c
··· 847 847 848 848 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 849 849 { 850 + unsigned long flags; 851 + u32 r; 852 + 853 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 850 854 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 851 855 S_000070_MC_IND_CITF_ARB0(1)); 852 - return RREG32(R_000074_MC_IND_DATA); 856 + r = RREG32(R_000074_MC_IND_DATA); 857 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 858 + return r; 853 859 } 854 860 855 861 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 856 862 { 863 + unsigned long flags; 864 + 865 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 857 866 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | 858 867 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); 859 868 WREG32(R_000074_MC_IND_DATA, v); 869 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 860 870 } 861 871 862 872 static void rs600_debugfs(struct radeon_device *rdev)
+7
drivers/gpu/drm/radeon/rs690.c
··· 631 631 632 632 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 633 633 { 634 + unsigned long flags; 634 635 uint32_t r; 635 636 637 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 636 638 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); 637 639 r = RREG32(R_00007C_MC_DATA); 638 640 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); 641 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 639 642 return r; 640 643 } 641 644 642 645 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 643 646 { 647 + unsigned long flags; 648 + 649 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 644 650 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | 645 651 S_000078_MC_IND_WR_EN(1)); 646 652 WREG32(R_00007C_MC_DATA, v); 647 653 WREG32(R_000078_MC_INDEX, 0x7F); 654 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 648 655 } 649 656 650 657 static void rs690_mc_program(struct radeon_device *rdev)
+92 -20
drivers/gpu/drm/radeon/rs780_dpm.c
··· 62 62 radeon_crtc = to_radeon_crtc(crtc); 63 63 pi->crtc_id = radeon_crtc->crtc_id; 64 64 if (crtc->mode.htotal && crtc->mode.vtotal) 65 - pi->refresh_rate = 66 - (crtc->mode.clock * 1000) / 67 - (crtc->mode.htotal * crtc->mode.vtotal); 65 + pi->refresh_rate = drm_mode_vrefresh(&crtc->mode); 68 66 break; 69 67 } 70 68 } ··· 374 376 WREG32_P(CG_INTGFX_MISC, 0, ~0xFFF00000); 375 377 } 376 378 377 - static void rs780_force_voltage_to_high(struct radeon_device *rdev) 379 + static void rs780_force_voltage(struct radeon_device *rdev, u16 voltage) 378 380 { 379 - struct igp_power_info *pi = rs780_get_pi(rdev); 380 381 struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 381 382 382 383 if ((current_state->max_voltage == RS780_VDDC_LEVEL_HIGH) && ··· 387 390 udelay(1); 388 391 389 392 WREG32_P(FVTHROT_PWM_CTRL_REG0, 390 - STARTING_PWM_HIGHTIME(pi->max_voltage), 393 + STARTING_PWM_HIGHTIME(voltage), 391 394 ~STARTING_PWM_HIGHTIME_MASK); 392 395 393 396 WREG32_P(FVTHROT_PWM_CTRL_REG0, ··· 397 400 ~RANGE_PWM_FEEDBACK_DIV_EN); 398 401 399 402 udelay(1); 403 + 404 + WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 405 + } 406 + 407 + static void rs780_force_fbdiv(struct radeon_device *rdev, u32 fb_div) 408 + { 409 + struct igp_ps *current_state = rs780_get_ps(rdev->pm.dpm.current_ps); 410 + 411 + if (current_state->sclk_low == current_state->sclk_high) 412 + return; 413 + 414 + WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 415 + 416 + WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(fb_div), 417 + ~FORCED_FEEDBACK_DIV_MASK); 418 + WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(fb_div), 419 + ~STARTING_FEEDBACK_DIV_MASK); 420 + WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); 421 + 422 + udelay(100); 400 423 401 424 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 402 425 } ··· 449 432 if (ret) 450 433 return ret; 451 434 452 - WREG32_P(GFX_MACRO_BYPASS_CNTL, SPLL_BYPASS_CNTL, ~SPLL_BYPASS_CNTL); 435 + if ((min_dividers.ref_div != max_dividers.ref_div) || 436 + (min_dividers.post_div != max_dividers.post_div) || 437 + (max_dividers.ref_div != current_max_dividers.ref_div) || 438 + (max_dividers.post_div != current_max_dividers.post_div)) 439 + return -EINVAL; 453 440 454 - WREG32_P(FVTHROT_FBDIV_REG2, FORCED_FEEDBACK_DIV(max_dividers.fb_div), 455 - ~FORCED_FEEDBACK_DIV_MASK); 456 - WREG32_P(FVTHROT_FBDIV_REG1, STARTING_FEEDBACK_DIV(max_dividers.fb_div), 457 - ~STARTING_FEEDBACK_DIV_MASK); 458 - WREG32_P(FVTHROT_FBDIV_REG1, FORCE_FEEDBACK_DIV, ~FORCE_FEEDBACK_DIV); 459 - 460 - udelay(100); 461 - 462 - WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~SPLL_BYPASS_CNTL); 441 + rs780_force_fbdiv(rdev, max_dividers.fb_div); 463 442 464 443 if (max_dividers.fb_div > min_dividers.fb_div) { 465 444 WREG32_P(FVTHROT_FBDIV_REG0, ··· 497 484 498 485 if ((new_state->sclk_high == old_state->sclk_high) && 499 486 (new_state->sclk_low == old_state->sclk_low)) 487 + return; 488 + 489 + if (new_state->sclk_high == new_state->sclk_low) 500 490 return; 501 491 502 492 rs780_clk_scaling_enable(rdev, true); ··· 665 649 rs780_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 666 650 667 651 if (pi->voltage_control) { 668 - rs780_force_voltage_to_high(rdev); 652 + rs780_force_voltage(rdev, pi->max_voltage); 669 653 mdelay(5); 670 654 } 671 655 ··· 733 717 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 734 718 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 735 719 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 736 - } else if (r600_is_uvd_state(rps->class, rps->class2)) { 737 - rps->vclk = RS780_DEFAULT_VCLK_FREQ; 738 - rps->dclk = RS780_DEFAULT_DCLK_FREQ; 739 720 } else { 740 721 rps->vclk = 0; 741 722 rps->dclk = 0; 723 + } 724 + 725 + if (r600_is_uvd_state(rps->class, rps->class2)) { 726 + if ((rps->vclk == 0) || (rps->dclk == 0)) { 727 + rps->vclk = RS780_DEFAULT_VCLK_FREQ; 728 + rps->dclk = RS780_DEFAULT_DCLK_FREQ; 729 + } 742 730 } 743 731 744 732 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) ··· 1005 985 else 1006 986 seq_printf(m, "power level 1 sclk: %u vddc_index: %d\n", 1007 987 ps->sclk_high, ps->max_voltage); 988 + } 989 + 990 + int rs780_dpm_force_performance_level(struct radeon_device *rdev, 991 + enum radeon_dpm_forced_level level) 992 + { 993 + struct igp_power_info *pi = rs780_get_pi(rdev); 994 + struct radeon_ps *rps = rdev->pm.dpm.current_ps; 995 + struct igp_ps *ps = rs780_get_ps(rps); 996 + struct atom_clock_dividers dividers; 997 + int ret; 998 + 999 + rs780_clk_scaling_enable(rdev, false); 1000 + rs780_voltage_scaling_enable(rdev, false); 1001 + 1002 + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1003 + if (pi->voltage_control) 1004 + rs780_force_voltage(rdev, pi->max_voltage); 1005 + 1006 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1007 + ps->sclk_high, false, &dividers); 1008 + if (ret) 1009 + return ret; 1010 + 1011 + rs780_force_fbdiv(rdev, dividers.fb_div); 1012 + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1013 + ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1014 + ps->sclk_low, false, &dividers); 1015 + if (ret) 1016 + return ret; 1017 + 1018 + rs780_force_fbdiv(rdev, dividers.fb_div); 1019 + 1020 + if (pi->voltage_control) 1021 + rs780_force_voltage(rdev, pi->min_voltage); 1022 + } else { 1023 + if (pi->voltage_control) 1024 + rs780_force_voltage(rdev, pi->max_voltage); 1025 + 1026 + if (ps->sclk_high != ps->sclk_low) { 1027 + WREG32_P(FVTHROT_FBDIV_REG1, 0, ~FORCE_FEEDBACK_DIV); 1028 + rs780_clk_scaling_enable(rdev, true); 1029 + } 1030 + 1031 + if (pi->voltage_control) { 1032 + rs780_voltage_scaling_enable(rdev, true); 1033 + rs780_enable_voltage_scaling(rdev, rps); 1034 + } 1035 + } 1036 + 1037 + rdev->pm.dpm.forced_level = level; 1038 + 1039 + return 0; 1008 1040 }
+8
drivers/gpu/drm/radeon/rv515.c
··· 209 209 210 210 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 211 211 { 212 + unsigned long flags; 212 213 uint32_t r; 213 214 215 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 214 216 WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); 215 217 r = RREG32(MC_IND_DATA); 216 218 WREG32(MC_IND_INDEX, 0); 219 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 220 + 217 221 return r; 218 222 } 219 223 220 224 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 221 225 { 226 + unsigned long flags; 227 + 228 + spin_lock_irqsave(&rdev->mc_idx_lock, flags); 222 229 WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); 223 230 WREG32(MC_IND_DATA, (v)); 224 231 WREG32(MC_IND_INDEX, 0); 232 + spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); 225 233 } 226 234 227 235 #if defined(CONFIG_DEBUG_FS)
-2
drivers/gpu/drm/radeon/rv6xx_dpm.c
··· 1758 1758 1759 1759 rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1760 1760 1761 - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1762 - 1763 1761 return 0; 1764 1762 } 1765 1763
+7 -9
drivers/gpu/drm/radeon/rv770_dpm.c
··· 2064 2064 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps); 2065 2065 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 2066 2066 2067 - ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 2068 - if (ret) { 2069 - DRM_ERROR("rv770_dpm_force_performance_level failed\n"); 2070 - return ret; 2071 - } 2072 - 2073 2067 return 0; 2074 2068 } 2075 2069 ··· 2141 2147 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2142 2148 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2143 2149 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2144 - } else if (r600_is_uvd_state(rps->class, rps->class2)) { 2145 - rps->vclk = RV770_DEFAULT_VCLK_FREQ; 2146 - rps->dclk = RV770_DEFAULT_DCLK_FREQ; 2147 2150 } else { 2148 2151 rps->vclk = 0; 2149 2152 rps->dclk = 0; 2153 + } 2154 + 2155 + if (r600_is_uvd_state(rps->class, rps->class2)) { 2156 + if ((rps->vclk == 0) || (rps->dclk == 0)) { 2157 + rps->vclk = RV770_DEFAULT_VCLK_FREQ; 2158 + rps->dclk = RV770_DEFAULT_DCLK_FREQ; 2159 + } 2150 2160 } 2151 2161 2152 2162 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+27 -17
drivers/gpu/drm/radeon/rv770_smc.c
··· 274 274 0x08, 0x72, 0x08, 0x72 275 275 }; 276 276 277 - int rv770_set_smc_sram_address(struct radeon_device *rdev, 278 - u16 smc_address, u16 limit) 277 + static int rv770_set_smc_sram_address(struct radeon_device *rdev, 278 + u16 smc_address, u16 limit) 279 279 { 280 280 u32 addr; 281 281 ··· 296 296 u16 smc_start_address, const u8 *src, 297 297 u16 byte_count, u16 limit) 298 298 { 299 + unsigned long flags; 299 300 u32 data, original_data, extra_shift; 300 301 u16 addr; 301 - int ret; 302 + int ret = 0; 302 303 303 304 if (smc_start_address & 3) 304 305 return -EINVAL; ··· 308 307 309 308 addr = smc_start_address; 310 309 310 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 311 311 while (byte_count >= 4) { 312 312 /* SMC address space is BE */ 313 313 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 314 314 315 315 ret = rv770_set_smc_sram_address(rdev, addr, limit); 316 316 if (ret) 317 - return ret; 317 + goto done; 318 318 319 319 WREG32(SMC_SRAM_DATA, data); 320 320 ··· 330 328 331 329 ret = rv770_set_smc_sram_address(rdev, addr, limit); 332 330 if (ret) 333 - return ret; 331 + goto done; 334 332 335 333 original_data = RREG32(SMC_SRAM_DATA); 336 334 ··· 348 346 349 347 ret = rv770_set_smc_sram_address(rdev, addr, limit); 350 348 if (ret) 351 - return ret; 349 + goto done; 352 350 353 351 WREG32(SMC_SRAM_DATA, data); 354 352 } 355 353 356 - return 0; 354 + done: 355 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 356 + 357 + return ret; 357 358 } 358 359 359 360 static int rv770_program_interrupt_vectors(struct radeon_device *rdev, ··· 466 461 467 462 static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) 468 463 { 464 + unsigned long flags; 469 465 u16 i; 470 466 467 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 471 468 for (i = 0; i < limit; i += 4) { 472 469 rv770_set_smc_sram_address(rdev, i, limit); 473 470 WREG32(SMC_SRAM_DATA, 0); 474 471 } 472 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 475 473 } 476 474 477 475 int rv770_load_smc_ucode(struct radeon_device *rdev, ··· 603 595 int rv770_read_smc_sram_dword(struct radeon_device *rdev, 604 596 u16 smc_address, u32 *value, u16 limit) 605 597 { 598 + unsigned long flags; 606 599 int ret; 607 600 601 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 608 602 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 609 - if (ret) 610 - return ret; 603 + if (ret == 0) 604 + *value = RREG32(SMC_SRAM_DATA); 605 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 611 606 612 - *value = RREG32(SMC_SRAM_DATA); 613 - 614 - return 0; 607 + return ret; 615 608 } 616 609 617 610 int rv770_write_smc_sram_dword(struct radeon_device *rdev, 618 611 u16 smc_address, u32 value, u16 limit) 619 612 { 613 + unsigned long flags; 620 614 int ret; 621 615 616 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 622 617 ret = rv770_set_smc_sram_address(rdev, smc_address, limit); 623 - if (ret) 624 - return ret; 618 + if (ret == 0) 619 + WREG32(SMC_SRAM_DATA, value); 620 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 625 621 626 - WREG32(SMC_SRAM_DATA, value); 627 - 628 - return 0; 622 + return ret; 629 623 }
-2
drivers/gpu/drm/radeon/rv770_smc.h
··· 187 187 #define RV770_SMC_SOFT_REGISTER_uvd_enabled 0x9C 188 188 #define RV770_SMC_SOFT_REGISTER_is_asic_lombok 0xA0 189 189 190 - int rv770_set_smc_sram_address(struct radeon_device *rdev, 191 - u16 smc_address, u16 limit); 192 190 int rv770_copy_bytes_to_smc(struct radeon_device *rdev, 193 191 u16 smc_start_address, const u8 *src, 194 192 u16 byte_count, u16 limit);
+1 -1
drivers/gpu/drm/radeon/rv770d.h
··· 852 852 #define AFMT_VBI_PACKET_CONTROL 0x7608 853 853 # define AFMT_GENERIC0_UPDATE (1 << 2) 854 854 #define AFMT_INFOFRAME_CONTROL0 0x760c 855 - # define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */ 855 + # define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */ 856 856 # define AFMT_AUDIO_INFO_UPDATE (1 << 7) 857 857 # define AFMT_MPEG_INFO_UPDATE (1 << 10) 858 858 #define AFMT_GENERIC0_7 0x7610
+17 -4
drivers/gpu/drm/radeon/si.c
··· 83 83 uint64_t pe, 84 84 uint64_t addr, unsigned count, 85 85 uint32_t incr, uint32_t flags); 86 + static void si_enable_gui_idle_interrupt(struct radeon_device *rdev, 87 + bool enable); 86 88 87 89 static const u32 verde_rlc_save_restore_register_list[] = 88 90 { ··· 3388 3386 u32 rb_bufsz; 3389 3387 int r; 3390 3388 3389 + si_enable_gui_idle_interrupt(rdev, false); 3390 + 3391 3391 WREG32(CP_SEM_WAIT_TIMER, 0x0); 3392 3392 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 3393 3393 ··· 3504 3500 if (r) { 3505 3501 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 3506 3502 } 3503 + 3504 + si_enable_gui_idle_interrupt(rdev, true); 3507 3505 3508 3506 return 0; 3509 3507 } ··· 4894 4888 { 4895 4889 u32 tmp; 4896 4890 4897 - if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) { 4891 + if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) { 4898 4892 tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10); 4899 4893 WREG32(RLC_TTOP_D, tmp); 4900 4894 ··· 5256 5250 u32 block, bool enable) 5257 5251 { 5258 5252 if (block & RADEON_CG_BLOCK_GFX) { 5253 + si_enable_gui_idle_interrupt(rdev, false); 5259 5254 /* order matters! */ 5260 5255 if (enable) { 5261 5256 si_enable_mgcg(rdev, true); ··· 5265 5258 si_enable_cgcg(rdev, false); 5266 5259 si_enable_mgcg(rdev, false); 5267 5260 } 5261 + si_enable_gui_idle_interrupt(rdev, true); 5268 5262 } 5269 5263 5270 5264 if (block & RADEON_CG_BLOCK_MC) { ··· 5416 5408 si_init_dma_pg(rdev); 5417 5409 } 5418 5410 si_init_ao_cu_mask(rdev); 5419 - if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) { 5411 + if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) { 5420 5412 si_init_gfx_cgpg(rdev); 5421 5413 } 5422 5414 si_enable_dma_pg(rdev, true); ··· 5568 5560 { 5569 5561 u32 tmp; 5570 5562 5571 - WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5563 + tmp = RREG32(CP_INT_CNTL_RING0) & 5564 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5565 + WREG32(CP_INT_CNTL_RING0, tmp); 5572 5566 WREG32(CP_INT_CNTL_RING1, 0); 5573 5567 WREG32(CP_INT_CNTL_RING2, 0); 5574 5568 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; ··· 5695 5685 5696 5686 int si_irq_set(struct radeon_device *rdev) 5697 5687 { 5698 - u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 5688 + u32 cp_int_cntl; 5699 5689 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 5700 5690 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5701 5691 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; ··· 5715 5705 si_disable_interrupt_state(rdev); 5716 5706 return 0; 5717 5707 } 5708 + 5709 + cp_int_cntl = RREG32(CP_INT_CNTL_RING0) & 5710 + (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 5718 5711 5719 5712 if (!ASIC_IS_NODCE(rdev)) { 5720 5713 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
-6
drivers/gpu/drm/radeon/si_dpm.c
··· 6075 6075 return ret; 6076 6076 } 6077 6077 6078 - ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); 6079 - if (ret) { 6080 - DRM_ERROR("si_dpm_force_performance_level failed\n"); 6081 - return ret; 6082 - } 6083 - 6084 6078 si_update_cg(rdev, (RADEON_CG_BLOCK_GFX | 6085 6079 RADEON_CG_BLOCK_MC | 6086 6080 RADEON_CG_BLOCK_SDMA |
+28 -15
drivers/gpu/drm/radeon/si_smc.c
··· 29 29 #include "ppsmc.h" 30 30 #include "radeon_ucode.h" 31 31 32 - int si_set_smc_sram_address(struct radeon_device *rdev, 33 - u32 smc_address, u32 limit) 32 + static int si_set_smc_sram_address(struct radeon_device *rdev, 33 + u32 smc_address, u32 limit) 34 34 { 35 35 if (smc_address & 3) 36 36 return -EINVAL; ··· 47 47 u32 smc_start_address, 48 48 const u8 *src, u32 byte_count, u32 limit) 49 49 { 50 - int ret; 50 + unsigned long flags; 51 + int ret = 0; 51 52 u32 data, original_data, addr, extra_shift; 52 53 53 54 if (smc_start_address & 3) ··· 58 57 59 58 addr = smc_start_address; 60 59 60 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 61 61 while (byte_count >= 4) { 62 62 /* SMC address space is BE */ 63 63 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; 64 64 65 65 ret = si_set_smc_sram_address(rdev, addr, limit); 66 66 if (ret) 67 - return ret; 67 + goto done; 68 68 69 69 WREG32(SMC_IND_DATA_0, data); 70 70 ··· 80 78 81 79 ret = si_set_smc_sram_address(rdev, addr, limit); 82 80 if (ret) 83 - return ret; 81 + goto done; 84 82 85 83 original_data = RREG32(SMC_IND_DATA_0); 86 84 ··· 98 96 99 97 ret = si_set_smc_sram_address(rdev, addr, limit); 100 98 if (ret) 101 - return ret; 99 + goto done; 102 100 103 101 WREG32(SMC_IND_DATA_0, data); 104 102 } 105 - return 0; 103 + 104 + done: 105 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 106 + 107 + return ret; 106 108 } 107 109 108 110 void si_start_smc(struct radeon_device *rdev) ··· 209 203 210 204 int si_load_smc_ucode(struct radeon_device *rdev, u32 limit) 211 205 { 206 + unsigned long flags; 212 207 u32 ucode_start_address; 213 208 u32 ucode_size; 214 209 const u8 *src; ··· 248 241 return -EINVAL; 249 242 250 243 src = (const u8 *)rdev->smc_fw->data; 244 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 251 245 WREG32(SMC_IND_INDEX_0, ucode_start_address); 252 246 WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); 253 247 while (ucode_size >= 4) { ··· 261 253 ucode_size -= 4; 262 254 } 263 255 WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); 256 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 264 257 265 258 return 0; 266 259 } ··· 269 260 int si_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 270 261 u32 *value, u32 limit) 271 262 { 263 + unsigned long flags; 272 264 int ret; 273 265 266 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 274 267 ret = si_set_smc_sram_address(rdev, smc_address, limit); 275 - if (ret) 276 - return ret; 268 + if (ret == 0) 269 + *value = RREG32(SMC_IND_DATA_0); 270 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 277 271 278 - *value = RREG32(SMC_IND_DATA_0); 279 - return 0; 272 + return ret; 280 273 } 281 274 282 275 int si_write_smc_sram_dword(struct radeon_device *rdev, u32 smc_address, 283 276 u32 value, u32 limit) 284 277 { 278 + unsigned long flags; 285 279 int ret; 286 280 281 + spin_lock_irqsave(&rdev->smc_idx_lock, flags); 287 282 ret = si_set_smc_sram_address(rdev, smc_address, limit); 288 - if (ret) 289 - return ret; 283 + if (ret == 0) 284 + WREG32(SMC_IND_DATA_0, value); 285 + spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); 290 286 291 - WREG32(SMC_IND_DATA_0, value); 292 - return 0; 287 + return ret; 293 288 }
-2
drivers/gpu/drm/radeon/sumo_dpm.c
··· 1319 1319 if (pi->enable_dpm) 1320 1320 sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1321 1321 1322 - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1323 - 1324 1322 return 0; 1325 1323 } 1326 1324
+16 -1
drivers/gpu/drm/radeon/trinity_dpm.c
··· 1068 1068 pi->requested_rps.ps_priv = &pi->requested_ps; 1069 1069 } 1070 1070 1071 + void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1072 + { 1073 + struct trinity_power_info *pi = trinity_get_pi(rdev); 1074 + 1075 + if (pi->enable_bapm) { 1076 + trinity_acquire_mutex(rdev); 1077 + trinity_dpm_bapm_enable(rdev, enable); 1078 + trinity_release_mutex(rdev); 1079 + } 1080 + } 1081 + 1071 1082 int trinity_dpm_enable(struct radeon_device *rdev) 1072 1083 { 1073 1084 struct trinity_power_info *pi = trinity_get_pi(rdev); ··· 1102 1091 trinity_program_sclk_dpm(rdev); 1103 1092 trinity_start_dpm(rdev); 1104 1093 trinity_wait_for_dpm_enabled(rdev); 1094 + trinity_dpm_bapm_enable(rdev, false); 1105 1095 trinity_release_mutex(rdev); 1106 1096 1107 1097 if (rdev->irq.installed && ··· 1128 1116 trinity_release_mutex(rdev); 1129 1117 return; 1130 1118 } 1119 + trinity_dpm_bapm_enable(rdev, false); 1131 1120 trinity_disable_clock_power_gating(rdev); 1132 1121 sumo_clear_vc(rdev); 1133 1122 trinity_wait_for_level_0(rdev); ··· 1225 1212 1226 1213 trinity_acquire_mutex(rdev); 1227 1214 if (pi->enable_dpm) { 1215 + if (pi->enable_bapm) 1216 + trinity_dpm_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1228 1217 trinity_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); 1229 1218 trinity_enable_power_level_0(rdev); 1230 1219 trinity_force_level_0(rdev); ··· 1236 1221 trinity_force_level_0(rdev); 1237 1222 trinity_unforce_levels(rdev); 1238 1223 trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); 1239 - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1240 1224 } 1241 1225 trinity_release_mutex(rdev); 1242 1226 ··· 1868 1854 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1869 1855 pi->at[i] = TRINITY_AT_DFLT; 1870 1856 1857 + pi->enable_bapm = true; 1871 1858 pi->enable_nbps_policy = true; 1872 1859 pi->enable_sclk_ds = true; 1873 1860 pi->enable_gfx_power_gating = true;
+2
drivers/gpu/drm/radeon/trinity_dpm.h
··· 108 108 bool enable_auto_thermal_throttling; 109 109 bool enable_dpm; 110 110 bool enable_sclk_ds; 111 + bool enable_bapm; 111 112 bool uvd_dpm; 112 113 struct radeon_ps current_rps; 113 114 struct trinity_ps current_ps; ··· 119 118 #define TRINITY_AT_DFLT 30 120 119 121 120 /* trinity_smc.c */ 121 + int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable); 122 122 int trinity_dpm_config(struct radeon_device *rdev, bool enable); 123 123 int trinity_uvd_dpm_config(struct radeon_device *rdev); 124 124 int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
+8
drivers/gpu/drm/radeon/trinity_smc.c
··· 56 56 return 0; 57 57 } 58 58 59 + int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable) 60 + { 61 + if (enable) 62 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM); 63 + else 64 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM); 65 + } 66 + 59 67 int trinity_dpm_config(struct radeon_device *rdev, bool enable) 60 68 { 61 69 if (enable)
+1 -1
drivers/gpu/drm/ttm/ttm_object.c
··· 218 218 uint32_t key) 219 219 { 220 220 struct ttm_object_device *tdev = tfile->tdev; 221 - struct ttm_base_object *base; 221 + struct ttm_base_object *uninitialized_var(base); 222 222 struct drm_hash_item *hash; 223 223 int ret; 224 224
+1 -1
drivers/gpu/drm/ttm/ttm_tt.c
··· 170 170 ttm_tt_unbind(ttm); 171 171 } 172 172 173 - if (likely(ttm->pages != NULL)) { 173 + if (ttm->state == tt_unbound) { 174 174 ttm->bdev->driver->ttm_tt_unpopulate(ttm); 175 175 } 176 176
-1
drivers/gpu/drm/udl/udl_gem.c
··· 97 97 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); 98 98 switch (ret) { 99 99 case -EAGAIN: 100 - set_need_resched(); 101 100 case 0: 102 101 case -ERESTARTSYS: 103 102 return VM_FAULT_NOPAGE;
+65 -9
drivers/hid/hid-core.c
··· 94 94 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) 95 95 { 96 96 struct hid_field *field; 97 - int i; 98 97 99 98 if (report->maxfield == HID_MAX_FIELDS) { 100 99 hid_err(report->device, "too many fields in report\n"); ··· 111 112 field->usage = (struct hid_usage *)(field + 1); 112 113 field->value = (s32 *)(field->usage + usages); 113 114 field->report = report; 114 - 115 - for (i = 0; i < usages; i++) 116 - field->usage[i].usage_index = i; 117 115 118 116 return field; 119 117 } ··· 222 226 { 223 227 struct hid_report *report; 224 228 struct hid_field *field; 225 - int usages; 229 + unsigned usages; 226 230 unsigned offset; 227 - int i; 231 + unsigned i; 228 232 229 233 report = hid_register_report(parser->device, report_type, parser->global.report_id); 230 234 if (!report) { ··· 251 255 if (!parser->local.usage_index) /* Ignore padding fields */ 252 256 return 0; 253 257 254 - usages = max_t(int, parser->local.usage_index, parser->global.report_count); 258 + usages = max_t(unsigned, parser->local.usage_index, 259 + parser->global.report_count); 255 260 256 261 field = hid_register_field(report, usages, parser->global.report_count); 257 262 if (!field) ··· 263 266 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 264 267 265 268 for (i = 0; i < usages; i++) { 266 - int j = i; 269 + unsigned j = i; 267 270 /* Duplicate the last usage we parsed if we have excess values */ 268 271 if (i >= parser->local.usage_index) 269 272 j = parser->local.usage_index - 1; 270 273 field->usage[i].hid = parser->local.usage[j]; 271 274 field->usage[i].collection_index = 272 275 parser->local.collection_index[j]; 276 + field->usage[i].usage_index = i; 273 277 } 274 278 275 279 field->maxusage = usages; ··· 799 801 } 800 802 EXPORT_SYMBOL_GPL(hid_parse_report); 801 803 804 + static const char * const hid_report_names[] = { 805 + "HID_INPUT_REPORT", 806 + "HID_OUTPUT_REPORT", 807 + "HID_FEATURE_REPORT", 808 + }; 809 + /** 810 + * hid_validate_values - validate existing device report's value indexes 811 + * 812 + * @device: hid device 813 + * @type: which report type to examine 814 + * @id: which report ID to examine (0 for first) 815 + * @field_index: which report field to examine 816 + * @report_counts: expected number of values 817 + * 818 + * Validate the number of values in a given field of a given report, after 819 + * parsing. 820 + */ 821 + struct hid_report *hid_validate_values(struct hid_device *hid, 822 + unsigned int type, unsigned int id, 823 + unsigned int field_index, 824 + unsigned int report_counts) 825 + { 826 + struct hid_report *report; 827 + 828 + if (type > HID_FEATURE_REPORT) { 829 + hid_err(hid, "invalid HID report type %u\n", type); 830 + return NULL; 831 + } 832 + 833 + if (id >= HID_MAX_IDS) { 834 + hid_err(hid, "invalid HID report id %u\n", id); 835 + return NULL; 836 + } 837 + 838 + /* 839 + * Explicitly not using hid_get_report() here since it depends on 840 + * ->numbered being checked, which may not always be the case when 841 + * drivers go to access report values. 842 + */ 843 + report = hid->report_enum[type].report_id_hash[id]; 844 + if (!report) { 845 + hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 846 + return NULL; 847 + } 848 + if (report->maxfield <= field_index) { 849 + hid_err(hid, "not enough fields in %s %u\n", 850 + hid_report_names[type], id); 851 + return NULL; 852 + } 853 + if (report->field[field_index]->report_count < report_counts) { 854 + hid_err(hid, "not enough values in %s %u field %u\n", 855 + hid_report_names[type], id, field_index); 856 + return NULL; 857 + } 858 + return report; 859 + } 860 + EXPORT_SYMBOL_GPL(hid_validate_values); 861 + 802 862 /** 803 863 * hid_open_report - open a driver-specific device report 804 864 * ··· 1352 1296 goto out; 1353 1297 } 1354 1298 1355 - if (hid->claimed != HID_CLAIMED_HIDRAW) { 1299 + if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 1356 1300 for (a = 0; a < report->maxfield; a++) 1357 1301 hid_input_field(hid, report->field[a], cdata, interrupt); 1358 1302 hdrv = hid->driver;
+10 -1
drivers/hid/hid-input.c
··· 485 485 if (field->flags & HID_MAIN_ITEM_CONSTANT) 486 486 goto ignore; 487 487 488 + /* Ignore if report count is out of bounds. */ 489 + if (field->report_count < 1) 490 + goto ignore; 491 + 488 492 /* only LED usages are supported in output fields */ 489 493 if (field->report_type == HID_OUTPUT_REPORT && 490 494 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) { ··· 1240 1236 1241 1237 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1242 1238 list_for_each_entry(rep, &rep_enum->report_list, list) 1243 - for (i = 0; i < rep->maxfield; i++) 1239 + for (i = 0; i < rep->maxfield; i++) { 1240 + /* Ignore if report count is out of bounds. */ 1241 + if (rep->field[i]->report_count < 1) 1242 + continue; 1243 + 1244 1244 for (j = 0; j < rep->field[i]->maxusage; j++) { 1245 1245 /* Verify if Battery Strength feature is available */ 1246 1246 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]); ··· 1253 1245 drv->feature_mapping(hid, rep->field[i], 1254 1246 rep->field[i]->usage + j); 1255 1247 } 1248 + } 1256 1249 } 1257 1250 1258 1251 static struct hid_input *hidinput_allocate(struct hid_device *hid)
+19 -6
drivers/hid/hid-lenovo-tpkbd.c
··· 339 339 struct tpkbd_data_pointer *data_pointer; 340 340 size_t name_sz = strlen(dev_name(dev)) + 16; 341 341 char *name_mute, *name_micmute; 342 - int ret; 342 + int i, ret; 343 + 344 + /* Validate required reports. */ 345 + for (i = 0; i < 4; i++) { 346 + if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1)) 347 + return -ENODEV; 348 + } 349 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2)) 350 + return -ENODEV; 343 351 344 352 if (sysfs_create_group(&hdev->dev.kobj, 345 353 &tpkbd_attr_group_pointer)) { ··· 414 406 ret = hid_parse(hdev); 415 407 if (ret) { 416 408 hid_err(hdev, "hid_parse failed\n"); 417 - goto err_free; 409 + goto err; 418 410 } 419 411 420 412 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 421 413 if (ret) { 422 414 hid_err(hdev, "hid_hw_start failed\n"); 423 - goto err_free; 415 + goto err; 424 416 } 425 417 426 418 uhdev = (struct usbhid_device *) hdev->driver_data; 427 419 428 - if (uhdev->ifnum == 1) 429 - return tpkbd_probe_tp(hdev); 420 + if (uhdev->ifnum == 1) { 421 + ret = tpkbd_probe_tp(hdev); 422 + if (ret) 423 + goto err_hid; 424 + } 430 425 431 426 return 0; 432 - err_free: 427 + err_hid: 428 + hid_hw_stop(hdev); 429 + err: 433 430 return ret; 434 431 } 435 432
+3 -16
drivers/hid/hid-lg2ff.c
··· 64 64 struct hid_report *report; 65 65 struct hid_input *hidinput = list_entry(hid->inputs.next, 66 66 struct hid_input, list); 67 - struct list_head *report_list = 68 - &hid->report_enum[HID_OUTPUT_REPORT].report_list; 69 67 struct input_dev *dev = hidinput->input; 70 68 int error; 71 69 72 - if (list_empty(report_list)) { 73 - hid_err(hid, "no output report found\n"); 70 + /* Check that the report looks ok */ 71 + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7); 72 + if (!report) 74 73 return -ENODEV; 75 - } 76 - 77 - report = list_entry(report_list->next, struct hid_report, list); 78 - 79 - if (report->maxfield < 1) { 80 - hid_err(hid, "output report is empty\n"); 81 - return -ENODEV; 82 - } 83 - if (report->field[0]->report_count < 7) { 84 - hid_err(hid, "not enough values in the field\n"); 85 - return -ENODEV; 86 - } 87 74 88 75 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL); 89 76 if (!lg2ff)
+6 -23
drivers/hid/hid-lg3ff.c
··· 66 66 int x, y; 67 67 68 68 /* 69 - * Maxusage should always be 63 (maximum fields) 70 - * likely a better way to ensure this data is clean 69 + * Available values in the field should always be 63, but we only use up to 70 + * 35. Instead, clear the entire area, however big it is. 71 71 */ 72 - memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage); 72 + memset(report->field[0]->value, 0, 73 + sizeof(__s32) * report->field[0]->report_count); 73 74 74 75 switch (effect->type) { 75 76 case FF_CONSTANT: ··· 130 129 int lg3ff_init(struct hid_device *hid) 131 130 { 132 131 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 133 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 134 132 struct input_dev *dev = hidinput->input; 135 - struct hid_report *report; 136 - struct hid_field *field; 137 133 const signed short *ff_bits = ff3_joystick_ac; 138 134 int error; 139 135 int i; 140 136 141 - /* Find the report to use */ 142 - if (list_empty(report_list)) { 143 - hid_err(hid, "No output report found\n"); 144 - return -1; 145 - } 146 - 147 137 /* Check that the report looks ok */ 148 - report = list_entry(report_list->next, struct hid_report, list); 149 - if (!report) { 150 - hid_err(hid, "NULL output report\n"); 151 - return -1; 152 - } 153 - 154 - field = report->field[0]; 155 - if (!field) { 156 - hid_err(hid, "NULL field\n"); 157 - return -1; 158 - } 138 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) 139 + return -ENODEV; 159 140 160 141 /* Assume single fixed device G940 */ 161 142 for (i = 0; ff_bits[i] >= 0; i++)
+1 -19
drivers/hid/hid-lg4ff.c
··· 484 484 int lg4ff_init(struct hid_device *hid) 485 485 { 486 486 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 487 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 488 487 struct input_dev *dev = hidinput->input; 489 - struct hid_report *report; 490 - struct hid_field *field; 491 488 struct lg4ff_device_entry *entry; 492 489 struct lg_drv_data *drv_data; 493 490 struct usb_device_descriptor *udesc; 494 491 int error, i, j; 495 492 __u16 bcdDevice, rev_maj, rev_min; 496 493 497 - /* Find the report to use */ 498 - if (list_empty(report_list)) { 499 - hid_err(hid, "No output report found\n"); 500 - return -1; 501 - } 502 - 503 494 /* Check that the report looks ok */ 504 - report = list_entry(report_list->next, struct hid_report, list); 505 - if (!report) { 506 - hid_err(hid, "NULL output report\n"); 495 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) 507 496 return -1; 508 - } 509 - 510 - field = report->field[0]; 511 - if (!field) { 512 - hid_err(hid, "NULL field\n"); 513 - return -1; 514 - } 515 497 516 498 /* Check what wheel has been connected */ 517 499 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+2 -15
drivers/hid/hid-lgff.c
··· 128 128 int lgff_init(struct hid_device* hid) 129 129 { 130 130 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); 131 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; 132 131 struct input_dev *dev = hidinput->input; 133 - struct hid_report *report; 134 - struct hid_field *field; 135 132 const signed short *ff_bits = ff_joystick; 136 133 int error; 137 134 int i; 138 135 139 - /* Find the report to use */ 140 - if (list_empty(report_list)) { 141 - hid_err(hid, "No output report found\n"); 142 - return -1; 143 - } 144 - 145 136 /* Check that the report looks ok */ 146 - report = list_entry(report_list->next, struct hid_report, list); 147 - field = report->field[0]; 148 - if (!field) { 149 - hid_err(hid, "NULL field\n"); 150 - return -1; 151 - } 137 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) 138 + return -ENODEV; 152 139 153 140 for (i = 0; i < ARRAY_SIZE(devices); i++) { 154 141 if (dev->id.vendor == devices[i].idVendor &&
+8 -2
drivers/hid/hid-logitech-dj.c
··· 461 461 struct hid_report *report; 462 462 struct hid_report_enum *output_report_enum; 463 463 u8 *data = (u8 *)(&dj_report->device_index); 464 - int i; 464 + unsigned int i; 465 465 466 466 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; 467 467 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT]; ··· 471 471 return -ENODEV; 472 472 } 473 473 474 - for (i = 0; i < report->field[0]->report_count; i++) 474 + for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++) 475 475 report->field[0]->value[i] = data[i]; 476 476 477 477 hid_hw_request(hdev, report, HID_REQ_SET_REPORT); ··· 788 788 if (retval) { 789 789 dev_err(&hdev->dev, 790 790 "%s:parse of interface 2 failed\n", __func__); 791 + goto hid_parse_fail; 792 + } 793 + 794 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT, 795 + 0, DJREPORT_SHORT_LENGTH - 1)) { 796 + retval = -ENODEV; 791 797 goto hid_parse_fail; 792 798 } 793 799
+14 -12
drivers/hid/hid-multitouch.c
··· 101 101 unsigned last_slot_field; /* the last field of a slot */ 102 102 unsigned mt_report_id; /* the report ID of the multitouch device */ 103 103 unsigned pen_report_id; /* the report ID of the pen device */ 104 - __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ 105 - __s8 inputmode_index; /* InputMode HID feature index in the report */ 106 - __s8 maxcontact_report_id; /* Maximum Contact Number HID feature, 104 + __s16 inputmode; /* InputMode HID feature, -1 if non-existent */ 105 + __s16 inputmode_index; /* InputMode HID feature index in the report */ 106 + __s16 maxcontact_report_id; /* Maximum Contact Number HID feature, 107 107 -1 if non-existent */ 108 108 __u8 num_received; /* how many contacts we received */ 109 109 __u8 num_expected; /* expected last contact index */ ··· 312 312 struct hid_field *field, struct hid_usage *usage) 313 313 { 314 314 struct mt_device *td = hid_get_drvdata(hdev); 315 - int i; 316 315 317 316 switch (usage->hid) { 318 317 case HID_DG_INPUTMODE: 319 - td->inputmode = field->report->id; 320 - td->inputmode_index = 0; /* has to be updated below */ 321 - 322 - for (i=0; i < field->maxusage; i++) { 323 - if (field->usage[i].hid == usage->hid) { 324 - td->inputmode_index = i; 325 - break; 326 - } 318 + /* Ignore if value index is out of bounds. */ 319 + if (usage->usage_index >= field->report_count) { 320 + dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n"); 321 + break; 327 322 } 323 + 324 + td->inputmode = field->report->id; 325 + td->inputmode_index = usage->usage_index; 328 326 329 327 break; 330 328 case HID_DG_CONTACTMAX: ··· 509 511 mt_store_field(usage, td, hi); 510 512 return 1; 511 513 case HID_DG_CONTACTCOUNT: 514 + /* Ignore if indexes are out of bounds. */ 515 + if (field->index >= field->report->maxfield || 516 + usage->usage_index >= field->report_count) 517 + return 1; 512 518 td->cc_index = field->index; 513 519 td->cc_value_index = usage->usage_index; 514 520 return 1;
+4
drivers/hid/hid-sony.c
··· 537 537 drv_data = hid_get_drvdata(hdev); 538 538 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER)); 539 539 540 + /* Validate expected report characteristics. */ 541 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7)) 542 + return -ENODEV; 543 + 540 544 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL); 541 545 if (!buzz) { 542 546 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
+5
drivers/hid/hid-steelseries.c
··· 249 249 goto err_free; 250 250 } 251 251 252 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) { 253 + ret = -ENODEV; 254 + goto err_free; 255 + } 256 + 252 257 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 253 258 if (ret) { 254 259 hid_err(hdev, "hw start failed\n");
+5 -13
drivers/hid/hid-zpff.c
··· 68 68 struct hid_report *report; 69 69 struct hid_input *hidinput = list_entry(hid->inputs.next, 70 70 struct hid_input, list); 71 - struct list_head *report_list = 72 - &hid->report_enum[HID_OUTPUT_REPORT].report_list; 73 71 struct input_dev *dev = hidinput->input; 74 - int error; 72 + int i, error; 75 73 76 - if (list_empty(report_list)) { 77 - hid_err(hid, "no output report found\n"); 78 - return -ENODEV; 79 - } 80 - 81 - report = list_entry(report_list->next, struct hid_report, list); 82 - 83 - if (report->maxfield < 4) { 84 - hid_err(hid, "not enough fields in report\n"); 85 - return -ENODEV; 74 + for (i = 0; i < 4; i++) { 75 + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); 76 + if (!report) 77 + return -ENODEV; 86 78 } 87 79 88 80 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
+2 -2
drivers/iio/accel/bma180.c
··· 617 617 #ifdef CONFIG_PM_SLEEP 618 618 static int bma180_suspend(struct device *dev) 619 619 { 620 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 620 + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 621 621 struct bma180_data *data = iio_priv(indio_dev); 622 622 int ret; 623 623 ··· 630 630 631 631 static int bma180_resume(struct device *dev) 632 632 { 633 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 633 + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 634 634 struct bma180_data *data = iio_priv(indio_dev); 635 635 int ret; 636 636
+6 -5
drivers/iio/adc/at91_adc.c
··· 550 550 551 551 static int at91_adc_probe(struct platform_device *pdev) 552 552 { 553 - unsigned int prsc, mstrclk, ticks, adc_clk, shtim; 553 + unsigned int prsc, mstrclk, ticks, adc_clk, adc_clk_khz, shtim; 554 554 int ret; 555 555 struct iio_dev *idev; 556 556 struct at91_adc_state *st; ··· 643 643 */ 644 644 mstrclk = clk_get_rate(st->clk); 645 645 adc_clk = clk_get_rate(st->adc_clk); 646 + adc_clk_khz = adc_clk / 1000; 646 647 prsc = (mstrclk / (2 * adc_clk)) - 1; 647 648 648 649 if (!st->startup_time) { ··· 657 656 * defined in the electrical characteristics of the board, divided by 8. 658 657 * The formula thus is : Startup Time = (ticks + 1) * 8 / ADC Clock 659 658 */ 660 - ticks = round_up((st->startup_time * adc_clk / 661 - 1000000) - 1, 8) / 8; 659 + ticks = round_up((st->startup_time * adc_clk_khz / 660 + 1000) - 1, 8) / 8; 662 661 /* 663 662 * a minimal Sample and Hold Time is necessary for the ADC to guarantee 664 663 * the best converted final value between two channels selection 665 664 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock 666 665 */ 667 - shtim = round_up((st->sample_hold_time * adc_clk / 668 - 1000000) - 1, 1); 666 + shtim = round_up((st->sample_hold_time * adc_clk_khz / 667 + 1000) - 1, 1); 669 668 670 669 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask; 671 670 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
+2
drivers/iio/buffer_cb.c
··· 41 41 goto error_ret; 42 42 } 43 43 44 + iio_buffer_init(&cb_buff->buffer); 45 + 44 46 cb_buff->private = private; 45 47 cb_buff->cb = cb; 46 48 cb_buff->buffer.access = &iio_cb_access;
+6 -6
drivers/iio/dac/mcp4725.c
··· 37 37 38 38 static int mcp4725_suspend(struct device *dev) 39 39 { 40 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 41 - struct mcp4725_data *data = iio_priv(indio_dev); 40 + struct mcp4725_data *data = iio_priv(i2c_get_clientdata( 41 + to_i2c_client(dev))); 42 42 u8 outbuf[2]; 43 43 44 44 outbuf[0] = (data->powerdown_mode + 1) << 4; 45 45 outbuf[1] = 0; 46 46 data->powerdown = true; 47 47 48 - return i2c_master_send(to_i2c_client(dev), outbuf, 2); 48 + return i2c_master_send(data->client, outbuf, 2); 49 49 } 50 50 51 51 static int mcp4725_resume(struct device *dev) 52 52 { 53 - struct iio_dev *indio_dev = dev_to_iio_dev(dev); 54 - struct mcp4725_data *data = iio_priv(indio_dev); 53 + struct mcp4725_data *data = iio_priv(i2c_get_clientdata( 54 + to_i2c_client(dev))); 55 55 u8 outbuf[2]; 56 56 57 57 /* restore previous DAC value */ ··· 59 59 outbuf[1] = data->dac_value & 0xff; 60 60 data->powerdown = false; 61 61 62 - return i2c_master_send(to_i2c_client(dev), outbuf, 2); 62 + return i2c_master_send(data->client, outbuf, 2); 63 63 } 64 64 65 65 #ifdef CONFIG_PM_SLEEP
+4
drivers/iio/iio_core.h
··· 49 49 #define iio_buffer_poll_addr (&iio_buffer_poll) 50 50 #define iio_buffer_read_first_n_outer_addr (&iio_buffer_read_first_n_outer) 51 51 52 + void iio_disable_all_buffers(struct iio_dev *indio_dev); 53 + 52 54 #else 53 55 54 56 #define iio_buffer_poll_addr NULL 55 57 #define iio_buffer_read_first_n_outer_addr NULL 58 + 59 + static inline void iio_disable_all_buffers(struct iio_dev *indio_dev) {} 56 60 57 61 #endif 58 62
+28 -2
drivers/iio/industrialio-buffer.c
··· 454 454 return bytes; 455 455 } 456 456 457 + void iio_disable_all_buffers(struct iio_dev *indio_dev) 458 + { 459 + struct iio_buffer *buffer, *_buffer; 460 + 461 + if (list_empty(&indio_dev->buffer_list)) 462 + return; 463 + 464 + if (indio_dev->setup_ops->predisable) 465 + indio_dev->setup_ops->predisable(indio_dev); 466 + 467 + list_for_each_entry_safe(buffer, _buffer, 468 + &indio_dev->buffer_list, buffer_list) 469 + list_del_init(&buffer->buffer_list); 470 + 471 + indio_dev->currentmode = INDIO_DIRECT_MODE; 472 + if (indio_dev->setup_ops->postdisable) 473 + indio_dev->setup_ops->postdisable(indio_dev); 474 + } 475 + 457 476 int iio_update_buffers(struct iio_dev *indio_dev, 458 477 struct iio_buffer *insert_buffer, 459 478 struct iio_buffer *remove_buffer) ··· 541 522 * Note can only occur when adding a buffer. 542 523 */ 543 524 list_del_init(&insert_buffer->buffer_list); 544 - indio_dev->active_scan_mask = old_mask; 545 - success = -EINVAL; 525 + if (old_mask) { 526 + indio_dev->active_scan_mask = old_mask; 527 + success = -EINVAL; 528 + } 529 + else { 530 + kfree(compound_mask); 531 + ret = -EINVAL; 532 + goto error_ret; 533 + } 546 534 } 547 535 } else { 548 536 indio_dev->active_scan_mask = compound_mask;
+21 -10
drivers/iio/industrialio-core.c
··· 890 890 static void iio_dev_release(struct device *device) 891 891 { 892 892 struct iio_dev *indio_dev = dev_to_iio_dev(device); 893 - if (indio_dev->chrdev.dev) 894 - cdev_del(&indio_dev->chrdev); 895 893 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 896 894 iio_device_unregister_trigger_consumer(indio_dev); 897 895 iio_device_unregister_eventset(indio_dev); ··· 1010 1012 if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags)) 1011 1013 return -EBUSY; 1012 1014 1015 + iio_device_get(indio_dev); 1016 + 1013 1017 filp->private_data = indio_dev; 1014 1018 1015 1019 return 0; ··· 1025 1025 struct iio_dev *indio_dev = container_of(inode->i_cdev, 1026 1026 struct iio_dev, chrdev); 1027 1027 clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags); 1028 + iio_device_put(indio_dev); 1029 + 1028 1030 return 0; 1029 1031 } 1030 1032 ··· 1096 1094 indio_dev->setup_ops == NULL) 1097 1095 indio_dev->setup_ops = &noop_ring_setup_ops; 1098 1096 1099 - ret = device_add(&indio_dev->dev); 1100 - if (ret < 0) 1101 - goto error_unreg_eventset; 1102 1097 cdev_init(&indio_dev->chrdev, &iio_buffer_fileops); 1103 1098 indio_dev->chrdev.owner = indio_dev->info->driver_module; 1099 + indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj; 1104 1100 ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1); 1105 1101 if (ret < 0) 1106 - goto error_del_device; 1107 - return 0; 1102 + goto error_unreg_eventset; 1108 1103 1109 - error_del_device: 1110 - device_del(&indio_dev->dev); 1104 + ret = device_add(&indio_dev->dev); 1105 + if (ret < 0) 1106 + goto error_cdev_del; 1107 + 1108 + return 0; 1109 + error_cdev_del: 1110 + cdev_del(&indio_dev->chrdev); 1111 1111 error_unreg_eventset: 1112 1112 iio_device_unregister_eventset(indio_dev); 1113 1113 error_free_sysfs: ··· 1124 1120 void iio_device_unregister(struct iio_dev *indio_dev) 1125 1121 { 1126 1122 mutex_lock(&indio_dev->info_exist_lock); 1123 + 1124 + device_del(&indio_dev->dev); 1125 + 1126 + if (indio_dev->chrdev.dev) 1127 + cdev_del(&indio_dev->chrdev); 1128 + 1129 + iio_disable_all_buffers(indio_dev); 1130 + 1127 1131 indio_dev->info = NULL; 1128 1132 mutex_unlock(&indio_dev->info_exist_lock); 1129 - device_del(&indio_dev->dev); 1130 1133 } 1131 1134 EXPORT_SYMBOL(iio_device_unregister); 1132 1135 subsys_initcall(iio_init);
+14 -6
drivers/iio/industrialio-event.c
··· 72 72 static unsigned int iio_event_poll(struct file *filep, 73 73 struct poll_table_struct *wait) 74 74 { 75 - struct iio_event_interface *ev_int = filep->private_data; 75 + struct iio_dev *indio_dev = filep->private_data; 76 + struct iio_event_interface *ev_int = indio_dev->event_interface; 76 77 unsigned int events = 0; 77 78 78 79 poll_wait(filep, &ev_int->wait, wait); ··· 91 90 size_t count, 92 91 loff_t *f_ps) 93 92 { 94 - struct iio_event_interface *ev_int = filep->private_data; 93 + struct iio_dev *indio_dev = filep->private_data; 94 + struct iio_event_interface *ev_int = indio_dev->event_interface; 95 95 unsigned int copied; 96 96 int ret; 97 97 ··· 123 121 124 122 static int iio_event_chrdev_release(struct inode *inode, struct file *filep) 125 123 { 126 - struct iio_event_interface *ev_int = filep->private_data; 124 + struct iio_dev *indio_dev = filep->private_data; 125 + struct iio_event_interface *ev_int = indio_dev->event_interface; 127 126 128 127 spin_lock_irq(&ev_int->wait.lock); 129 128 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); ··· 135 132 */ 136 133 kfifo_reset_out(&ev_int->det_events); 137 134 spin_unlock_irq(&ev_int->wait.lock); 135 + 136 + iio_device_put(indio_dev); 138 137 139 138 return 0; 140 139 } ··· 163 158 return -EBUSY; 164 159 } 165 160 spin_unlock_irq(&ev_int->wait.lock); 166 - fd = anon_inode_getfd("iio:event", 167 - &iio_event_chrdev_fileops, ev_int, O_RDONLY | O_CLOEXEC); 161 + iio_device_get(indio_dev); 162 + 163 + fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, 164 + indio_dev, O_RDONLY | O_CLOEXEC); 168 165 if (fd < 0) { 169 166 spin_lock_irq(&ev_int->wait.lock); 170 167 __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); 171 168 spin_unlock_irq(&ev_int->wait.lock); 169 + iio_device_put(indio_dev); 172 170 } 173 171 return fd; 174 172 } ··· 284 276 goto error_ret; 285 277 } 286 278 if (chan->modified) 287 - mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel, 279 + mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2, 288 280 i/IIO_EV_DIR_MAX, 289 281 i%IIO_EV_DIR_MAX); 290 282 else if (chan->differential)
+4 -2
drivers/iio/temperature/tmp006.c
··· 252 252 #ifdef CONFIG_PM_SLEEP 253 253 static int tmp006_suspend(struct device *dev) 254 254 { 255 - return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev))); 255 + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 256 + return tmp006_powerdown(iio_priv(indio_dev)); 256 257 } 257 258 258 259 static int tmp006_resume(struct device *dev) 259 260 { 260 - struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev)); 261 + struct tmp006_data *data = iio_priv(i2c_get_clientdata( 262 + to_i2c_client(dev))); 261 263 return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG, 262 264 data->config | TMP006_CONFIG_MOD_MASK); 263 265 }
+2 -2
drivers/isdn/hardware/mISDN/hfcpci.c
··· 2295 2295 static void 2296 2296 hfcpci_softirq(void *arg) 2297 2297 { 2298 - (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, 2299 - _hfcpci_softirq); 2298 + WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg, 2299 + _hfcpci_softirq) != 0); 2300 2300 2301 2301 /* if next event would be in the past ... */ 2302 2302 if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
+2 -2
drivers/isdn/hisax/amd7930_fn.c
··· 314 314 315 315 t += sprintf(t, "Amd7930: empty_Dfifo cnt: %d |", cs->rcvidx); 316 316 QuickHex(t, cs->rcvbuf, cs->rcvidx); 317 - debugl1(cs, cs->dlog); 317 + debugl1(cs, "%s", cs->dlog); 318 318 } 319 319 /* moves received data in sk-buffer */ 320 320 memcpy(skb_put(skb, cs->rcvidx), cs->rcvbuf, cs->rcvidx); ··· 406 406 407 407 t += sprintf(t, "Amd7930: fill_Dfifo cnt: %d |", count); 408 408 QuickHex(t, deb_ptr, count); 409 - debugl1(cs, cs->dlog); 409 + debugl1(cs, "%s", cs->dlog); 410 410 } 411 411 /* AMD interrupts on */ 412 412 AmdIrqOn(cs);
+2 -2
drivers/isdn/hisax/avm_pci.c
··· 285 285 t += sprintf(t, "hdlc_empty_fifo %c cnt %d", 286 286 bcs->channel ? 'B' : 'A', count); 287 287 QuickHex(t, p, count); 288 - debugl1(cs, bcs->blog); 288 + debugl1(cs, "%s", bcs->blog); 289 289 } 290 290 } 291 291 ··· 345 345 t += sprintf(t, "hdlc_fill_fifo %c cnt %d", 346 346 bcs->channel ? 'B' : 'A', count); 347 347 QuickHex(t, p, count); 348 - debugl1(cs, bcs->blog); 348 + debugl1(cs, "%s", bcs->blog); 349 349 } 350 350 } 351 351
+1 -1
drivers/isdn/hisax/config.c
··· 1896 1896 ptr--; 1897 1897 *ptr++ = '\n'; 1898 1898 *ptr = 0; 1899 - HiSax_putstatus(cs, NULL, cs->dlog); 1899 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1900 1900 } else 1901 1901 HiSax_putstatus(cs, "LogEcho: ", 1902 1902 "warning Frame too big (%d)",
+2 -2
drivers/isdn/hisax/diva.c
··· 427 427 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 428 428 bcs->hw.hscx.hscx ? 'B' : 'A', count); 429 429 QuickHex(t, ptr, count); 430 - debugl1(cs, bcs->blog); 430 + debugl1(cs, "%s", bcs->blog); 431 431 } 432 432 } 433 433 ··· 469 469 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 470 470 bcs->hw.hscx.hscx ? 'B' : 'A', count); 471 471 QuickHex(t, ptr, count); 472 - debugl1(cs, bcs->blog); 472 + debugl1(cs, "%s", bcs->blog); 473 473 } 474 474 } 475 475
+1 -1
drivers/isdn/hisax/elsa.c
··· 535 535 t = tmp; 536 536 t += sprintf(tmp, "Arcofi data"); 537 537 QuickHex(t, p, cs->dc.isac.mon_rxp); 538 - debugl1(cs, tmp); 538 + debugl1(cs, "%s", tmp); 539 539 if ((cs->dc.isac.mon_rxp == 2) && (cs->dc.isac.mon_rx[0] == 0xa0)) { 540 540 switch (cs->dc.isac.mon_rx[1]) { 541 541 case 0x80:
+1 -1
drivers/isdn/hisax/elsa_ser.c
··· 344 344 345 345 t += sprintf(t, "modem read cnt %d", cs->hw.elsa.rcvcnt); 346 346 QuickHex(t, cs->hw.elsa.rcvbuf, cs->hw.elsa.rcvcnt); 347 - debugl1(cs, tmp); 347 + debugl1(cs, "%s", tmp); 348 348 } 349 349 cs->hw.elsa.rcvcnt = 0; 350 350 }
+1 -1
drivers/isdn/hisax/hfc_pci.c
··· 901 901 ptr--; 902 902 *ptr++ = '\n'; 903 903 *ptr = 0; 904 - HiSax_putstatus(cs, NULL, cs->dlog); 904 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 905 905 } else 906 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 907 907 }
+1 -1
drivers/isdn/hisax/hfc_sx.c
··· 674 674 ptr--; 675 675 *ptr++ = '\n'; 676 676 *ptr = 0; 677 - HiSax_putstatus(cs, NULL, cs->dlog); 677 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 678 678 } else 679 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 680 680 }
+2 -2
drivers/isdn/hisax/hscx_irq.c
··· 75 75 t += sprintf(t, "hscx_empty_fifo %c cnt %d", 76 76 bcs->hw.hscx.hscx ? 'B' : 'A', count); 77 77 QuickHex(t, ptr, count); 78 - debugl1(cs, bcs->blog); 78 + debugl1(cs, "%s", bcs->blog); 79 79 } 80 80 } 81 81 ··· 115 115 t += sprintf(t, "hscx_fill_fifo %c cnt %d", 116 116 bcs->hw.hscx.hscx ? 'B' : 'A', count); 117 117 QuickHex(t, ptr, count); 118 - debugl1(cs, bcs->blog); 118 + debugl1(cs, "%s", bcs->blog); 119 119 } 120 120 } 121 121
+2 -2
drivers/isdn/hisax/icc.c
··· 134 134 135 135 t += sprintf(t, "icc_empty_fifo cnt %d", count); 136 136 QuickHex(t, ptr, count); 137 - debugl1(cs, cs->dlog); 137 + debugl1(cs, "%s", cs->dlog); 138 138 } 139 139 } 140 140 ··· 176 176 177 177 t += sprintf(t, "icc_fill_fifo cnt %d", count); 178 178 QuickHex(t, ptr, count); 179 - debugl1(cs, cs->dlog); 179 + debugl1(cs, "%s", cs->dlog); 180 180 } 181 181 } 182 182
+4 -4
drivers/isdn/hisax/ipacx.c
··· 260 260 261 261 t += sprintf(t, "dch_empty_fifo() cnt %d", count); 262 262 QuickHex(t, ptr, count); 263 - debugl1(cs, cs->dlog); 263 + debugl1(cs, "%s", cs->dlog); 264 264 } 265 265 } 266 266 ··· 307 307 308 308 t += sprintf(t, "dch_fill_fifo() cnt %d", count); 309 309 QuickHex(t, ptr, count); 310 - debugl1(cs, cs->dlog); 310 + debugl1(cs, "%s", cs->dlog); 311 311 } 312 312 } 313 313 ··· 539 539 540 540 t += sprintf(t, "bch_empty_fifo() B-%d cnt %d", hscx, count); 541 541 QuickHex(t, ptr, count); 542 - debugl1(cs, bcs->blog); 542 + debugl1(cs, "%s", bcs->blog); 543 543 } 544 544 } 545 545 ··· 582 582 583 583 t += sprintf(t, "chb_fill_fifo() B-%d cnt %d", hscx, count); 584 584 QuickHex(t, ptr, count); 585 - debugl1(cs, bcs->blog); 585 + debugl1(cs, "%s", bcs->blog); 586 586 } 587 587 } 588 588
+2 -2
drivers/isdn/hisax/isac.c
··· 137 137 138 138 t += sprintf(t, "isac_empty_fifo cnt %d", count); 139 139 QuickHex(t, ptr, count); 140 - debugl1(cs, cs->dlog); 140 + debugl1(cs, "%s", cs->dlog); 141 141 } 142 142 } 143 143 ··· 179 179 180 180 t += sprintf(t, "isac_fill_fifo cnt %d", count); 181 181 QuickHex(t, ptr, count); 182 - debugl1(cs, cs->dlog); 182 + debugl1(cs, "%s", cs->dlog); 183 183 } 184 184 } 185 185
+3 -3
drivers/isdn/hisax/isar.c
··· 74 74 t = tmp; 75 75 t += sprintf(t, "sendmbox cnt %d", len); 76 76 QuickHex(t, &msg[len-i], (i > 64) ? 64 : i); 77 - debugl1(cs, tmp); 77 + debugl1(cs, "%s", tmp); 78 78 i -= 64; 79 79 } 80 80 } ··· 105 105 t = tmp; 106 106 t += sprintf(t, "rcv_mbox cnt %d", ireg->clsb); 107 107 QuickHex(t, &msg[ireg->clsb - i], (i > 64) ? 64 : i); 108 - debugl1(cs, tmp); 108 + debugl1(cs, "%s", tmp); 109 109 i -= 64; 110 110 } 111 111 } ··· 1248 1248 tp += sprintf(debbuf, "msg iis(%x) msb(%x)", 1249 1249 ireg->iis, ireg->cmsb); 1250 1250 QuickHex(tp, (u_char *)ireg->par, ireg->clsb); 1251 - debugl1(cs, debbuf); 1251 + debugl1(cs, "%s", debbuf); 1252 1252 } 1253 1253 break; 1254 1254 case ISAR_IIS_INVMSG:
+5 -13
drivers/isdn/hisax/jade.c
··· 81 81 int jade = bcs->hw.hscx.hscx; 82 82 83 83 if (cs->debug & L1_DEB_HSCX) { 84 - char tmp[40]; 85 - sprintf(tmp, "jade %c mode %d ichan %d", 86 - 'A' + jade, mode, bc); 87 - debugl1(cs, tmp); 84 + debugl1(cs, "jade %c mode %d ichan %d", 'A' + jade, mode, bc); 88 85 } 89 86 bcs->mode = mode; 90 87 bcs->channel = bc; ··· 254 257 clear_pending_jade_ints(struct IsdnCardState *cs) 255 258 { 256 259 int val; 257 - char tmp[64]; 258 260 259 261 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0x00); 260 262 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0x00); 261 263 262 264 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_ISR); 263 - sprintf(tmp, "jade B ISTA %x", val); 264 - debugl1(cs, tmp); 265 + debugl1(cs, "jade B ISTA %x", val); 265 266 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_ISR); 266 - sprintf(tmp, "jade A ISTA %x", val); 267 - debugl1(cs, tmp); 267 + debugl1(cs, "jade A ISTA %x", val); 268 268 val = cs->BC_Read_Reg(cs, 1, jade_HDLC_STAR); 269 - sprintf(tmp, "jade B STAR %x", val); 270 - debugl1(cs, tmp); 269 + debugl1(cs, "jade B STAR %x", val); 271 270 val = cs->BC_Read_Reg(cs, 0, jade_HDLC_STAR); 272 - sprintf(tmp, "jade A STAR %x", val); 273 - debugl1(cs, tmp); 271 + debugl1(cs, "jade A STAR %x", val); 274 272 /* Unmask ints */ 275 273 cs->BC_Write_Reg(cs, 0, jade_HDLC_IMR, 0xF8); 276 274 cs->BC_Write_Reg(cs, 1, jade_HDLC_IMR, 0xF8);
+2 -2
drivers/isdn/hisax/jade_irq.c
··· 65 65 t += sprintf(t, "jade_empty_fifo %c cnt %d", 66 66 bcs->hw.hscx.hscx ? 'B' : 'A', count); 67 67 QuickHex(t, ptr, count); 68 - debugl1(cs, bcs->blog); 68 + debugl1(cs, "%s", bcs->blog); 69 69 } 70 70 } 71 71 ··· 105 105 t += sprintf(t, "jade_fill_fifo %c cnt %d", 106 106 bcs->hw.hscx.hscx ? 'B' : 'A', count); 107 107 QuickHex(t, ptr, count); 108 - debugl1(cs, bcs->blog); 108 + debugl1(cs, "%s", bcs->blog); 109 109 } 110 110 } 111 111
+18 -32
drivers/isdn/hisax/l3_1tr6.c
··· 63 63 { 64 64 dev_kfree_skb(skb); 65 65 if (pc->st->l3.debug & L3_DEB_WARN) 66 - l3_debug(pc->st, msg); 66 + l3_debug(pc->st, "%s", msg); 67 67 l3_1tr6_release_req(pc, 0, NULL); 68 68 } 69 69 ··· 161 161 { 162 162 u_char *p; 163 163 int bcfound = 0; 164 - char tmp[80]; 165 164 struct sk_buff *skb = arg; 166 165 167 166 /* Channel Identification */ ··· 213 214 /* Signal all services, linklevel takes care of Service-Indicator */ 214 215 if (bcfound) { 215 216 if ((pc->para.setup.si1 != 7) && (pc->st->l3.debug & L3_DEB_WARN)) { 216 - sprintf(tmp, "non-digital call: %s -> %s", 217 + l3_debug(pc->st, "non-digital call: %s -> %s", 217 218 pc->para.setup.phone, 218 219 pc->para.setup.eazmsn); 219 - l3_debug(pc->st, tmp); 220 220 } 221 221 newl3state(pc, 6); 222 222 pc->st->l3.l3l4(pc->st, CC_SETUP | INDICATION, pc); ··· 299 301 { 300 302 u_char *p; 301 303 int i, tmpcharge = 0; 302 - char a_charge[8], tmp[32]; 304 + char a_charge[8]; 303 305 struct sk_buff *skb = arg; 304 306 305 307 p = skb->data; ··· 314 316 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 315 317 } 316 318 if (pc->st->l3.debug & L3_DEB_CHARGE) { 317 - sprintf(tmp, "charging info %d", pc->para.chargeinfo); 318 - l3_debug(pc->st, tmp); 319 + l3_debug(pc->st, "charging info %d", 320 + pc->para.chargeinfo); 319 321 } 320 322 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 321 323 l3_debug(pc->st, "charging info not found"); ··· 397 399 struct sk_buff *skb = arg; 398 400 u_char *p; 399 401 int i, tmpcharge = 0; 400 - char a_charge[8], tmp[32]; 402 + char a_charge[8]; 401 403 402 404 StopAllL3Timer(pc); 403 405 p = skb->data; ··· 412 414 pc->st->l3.l3l4(pc->st, CC_CHARGE | INDICATION, pc); 413 415 } 414 416 if (pc->st->l3.debug & L3_DEB_CHARGE) { 415 - sprintf(tmp, "charging info %d", pc->para.chargeinfo); 416 - l3_debug(pc->st, tmp); 417 + l3_debug(pc->st, "charging info %d", 418 + pc->para.chargeinfo); 417 419 } 418 420 } else if (pc->st->l3.debug & L3_DEB_CHARGE) 419 421 l3_debug(pc->st, "charging info not found"); ··· 744 746 int i, mt, cr; 745 747 struct l3_process *proc; 746 748 struct sk_buff *skb = arg; 747 - char tmp[80]; 748 749 749 750 switch (pr) { 750 751 case (DL_DATA | INDICATION): ··· 759 762 } 760 763 if (skb->len < 4) { 761 764 if (st->l3.debug & L3_DEB_PROTERR) { 762 - sprintf(tmp, "up1tr6 len only %d", skb->len); 763 - l3_debug(st, tmp); 765 + l3_debug(st, "up1tr6 len only %d", skb->len); 764 766 } 765 767 dev_kfree_skb(skb); 766 768 return; 767 769 } 768 770 if ((skb->data[0] & 0xfe) != PROTO_DIS_N0) { 769 771 if (st->l3.debug & L3_DEB_PROTERR) { 770 - sprintf(tmp, "up1tr6%sunexpected discriminator %x message len %d", 772 + l3_debug(st, "up1tr6%sunexpected discriminator %x message len %d", 771 773 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 772 774 skb->data[0], skb->len); 773 - l3_debug(st, tmp); 774 775 } 775 776 dev_kfree_skb(skb); 776 777 return; 777 778 } 778 779 if (skb->data[1] != 1) { 779 780 if (st->l3.debug & L3_DEB_PROTERR) { 780 - sprintf(tmp, "up1tr6 CR len not 1"); 781 - l3_debug(st, tmp); 781 + l3_debug(st, "up1tr6 CR len not 1"); 782 782 } 783 783 dev_kfree_skb(skb); 784 784 return; ··· 785 791 if (skb->data[0] == PROTO_DIS_N0) { 786 792 dev_kfree_skb(skb); 787 793 if (st->l3.debug & L3_DEB_STATE) { 788 - sprintf(tmp, "up1tr6%s N0 mt %x unhandled", 794 + l3_debug(st, "up1tr6%s N0 mt %x unhandled", 789 795 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", mt); 790 - l3_debug(st, tmp); 791 796 } 792 797 } else if (skb->data[0] == PROTO_DIS_N1) { 793 798 if (!(proc = getl3proc(st, cr))) { ··· 794 801 if (cr < 128) { 795 802 if (!(proc = new_l3_process(st, cr))) { 796 803 if (st->l3.debug & L3_DEB_PROTERR) { 797 - sprintf(tmp, "up1tr6 no roc mem"); 798 - l3_debug(st, tmp); 804 + l3_debug(st, "up1tr6 no roc mem"); 799 805 } 800 806 dev_kfree_skb(skb); 801 807 return; ··· 813 821 } else { 814 822 if (!(proc = new_l3_process(st, cr))) { 815 823 if (st->l3.debug & L3_DEB_PROTERR) { 816 - sprintf(tmp, "up1tr6 no roc mem"); 817 - l3_debug(st, tmp); 824 + l3_debug(st, "up1tr6 no roc mem"); 818 825 } 819 826 dev_kfree_skb(skb); 820 827 return; ··· 828 837 if (i == ARRAY_SIZE(datastln1)) { 829 838 dev_kfree_skb(skb); 830 839 if (st->l3.debug & L3_DEB_STATE) { 831 - sprintf(tmp, "up1tr6%sstate %d mt %x unhandled", 840 + l3_debug(st, "up1tr6%sstate %d mt %x unhandled", 832 841 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 833 842 proc->state, mt); 834 - l3_debug(st, tmp); 835 843 } 836 844 return; 837 845 } else { 838 846 if (st->l3.debug & L3_DEB_STATE) { 839 - sprintf(tmp, "up1tr6%sstate %d mt %x", 847 + l3_debug(st, "up1tr6%sstate %d mt %x", 840 848 (pr == (DL_DATA | INDICATION)) ? " " : "(broadcast) ", 841 849 proc->state, mt); 842 - l3_debug(st, tmp); 843 850 } 844 851 datastln1[i].rout(proc, pr, skb); 845 852 } ··· 850 861 int i, cr; 851 862 struct l3_process *proc; 852 863 struct Channel *chan; 853 - char tmp[80]; 854 864 855 865 if ((DL_ESTABLISH | REQUEST) == pr) { 856 866 l3_msg(st, pr, NULL); ··· 876 888 break; 877 889 if (i == ARRAY_SIZE(downstl)) { 878 890 if (st->l3.debug & L3_DEB_STATE) { 879 - sprintf(tmp, "down1tr6 state %d prim %d unhandled", 891 + l3_debug(st, "down1tr6 state %d prim %d unhandled", 880 892 proc->state, pr); 881 - l3_debug(st, tmp); 882 893 } 883 894 } else { 884 895 if (st->l3.debug & L3_DEB_STATE) { 885 - sprintf(tmp, "down1tr6 state %d prim %d", 896 + l3_debug(st, "down1tr6 state %d prim %d", 886 897 proc->state, pr); 887 - l3_debug(st, tmp); 888 898 } 889 899 downstl[i].rout(proc, pr, arg); 890 900 }
+1 -1
drivers/isdn/hisax/netjet.c
··· 176 176 else 177 177 j = i; 178 178 QuickHex(t, p, j); 179 - debugl1(cs, tmp); 179 + debugl1(cs, "%s", tmp); 180 180 p += j; 181 181 i -= j; 182 182 t = tmp;
+3 -3
drivers/isdn/hisax/q931.c
··· 1179 1179 dp--; 1180 1180 *dp++ = '\n'; 1181 1181 *dp = 0; 1182 - HiSax_putstatus(cs, NULL, cs->dlog); 1182 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1183 1183 } else 1184 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1185 1185 } ··· 1246 1246 } 1247 1247 if (finish) { 1248 1248 *dp = 0; 1249 - HiSax_putstatus(cs, NULL, cs->dlog); 1249 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1250 1250 return; 1251 1251 } 1252 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ ··· 1509 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1510 1510 } 1511 1511 *dp = 0; 1512 - HiSax_putstatus(cs, NULL, cs->dlog); 1512 + HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1513 1513 }
+4 -4
drivers/isdn/hisax/w6692.c
··· 154 154 155 155 t += sprintf(t, "W6692_empty_fifo cnt %d", count); 156 156 QuickHex(t, ptr, count); 157 - debugl1(cs, cs->dlog); 157 + debugl1(cs, "%s", cs->dlog); 158 158 } 159 159 } 160 160 ··· 196 196 197 197 t += sprintf(t, "W6692_fill_fifo cnt %d", count); 198 198 QuickHex(t, ptr, count); 199 - debugl1(cs, cs->dlog); 199 + debugl1(cs, "%s", cs->dlog); 200 200 } 201 201 } 202 202 ··· 226 226 t += sprintf(t, "W6692B_empty_fifo %c cnt %d", 227 227 bcs->channel + '1', count); 228 228 QuickHex(t, ptr, count); 229 - debugl1(cs, bcs->blog); 229 + debugl1(cs, "%s", bcs->blog); 230 230 } 231 231 } 232 232 ··· 264 264 t += sprintf(t, "W6692B_fill_fifo %c cnt %d", 265 265 bcs->channel + '1', count); 266 266 QuickHex(t, ptr, count); 267 - debugl1(cs, bcs->blog); 267 + debugl1(cs, "%s", bcs->blog); 268 268 } 269 269 } 270 270
-1
drivers/mailbox/mailbox-omap2.c
··· 325 325 kfree(privblk); 326 326 kfree(mboxblk); 327 327 kfree(list); 328 - platform_set_drvdata(pdev, NULL); 329 328 330 329 return 0; 331 330 }
-7
drivers/mtd/nand/pxa3xx_nand.c
··· 1236 1236 return 0; 1237 1237 } 1238 1238 1239 - #ifdef CONFIG_OF 1240 1239 static struct of_device_id pxa3xx_nand_dt_ids[] = { 1241 1240 { 1242 1241 .compatible = "marvell,pxa3xx-nand", ··· 1283 1284 1284 1285 return 0; 1285 1286 } 1286 - #else 1287 - static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev) 1288 - { 1289 - return 0; 1290 - } 1291 - #endif 1292 1287 1293 1288 static int pxa3xx_nand_probe(struct platform_device *pdev) 1294 1289 {
+1 -1
drivers/net/bonding/bond_alb.c
··· 1472 1472 bond_info->lp_counter++; 1473 1473 1474 1474 /* send learning packets */ 1475 - if (bond_info->lp_counter >= BOND_ALB_LP_TICKS) { 1475 + if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1476 1476 /* change of curr_active_slave involves swapping of mac addresses. 1477 1477 * in order to avoid this swapping from happening while 1478 1478 * sending the learning packets, the curr_slave_lock must be held for
+5 -4
drivers/net/bonding/bond_alb.h
··· 36 36 * Used for division - never set 37 37 * to zero !!! 38 38 */ 39 - #define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of 40 - * learning packets to the switch 41 - */ 39 + #define BOND_ALB_DEFAULT_LP_INTERVAL 1 40 + #define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of 41 + * learning packets to the switch 42 + */ 42 43 43 44 #define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ 44 45 * ALB_TIMER_TICKS_PER_SEC) 45 46 46 - #define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \ 47 + #define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \ 47 48 * ALB_TIMER_TICKS_PER_SEC) 48 49 49 50 #define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
+1
drivers/net/bonding/bond_main.c
··· 4416 4416 params->all_slaves_active = all_slaves_active; 4417 4417 params->resend_igmp = resend_igmp; 4418 4418 params->min_links = min_links; 4419 + params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; 4419 4420 4420 4421 if (primary) { 4421 4422 strncpy(params->primary, primary, IFNAMSIZ);
+39
drivers/net/bonding/bond_sysfs.c
··· 1699 1699 static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, 1700 1700 bonding_show_resend_igmp, bonding_store_resend_igmp); 1701 1701 1702 + 1703 + static ssize_t bonding_show_lp_interval(struct device *d, 1704 + struct device_attribute *attr, 1705 + char *buf) 1706 + { 1707 + struct bonding *bond = to_bond(d); 1708 + return sprintf(buf, "%d\n", bond->params.lp_interval); 1709 + } 1710 + 1711 + static ssize_t bonding_store_lp_interval(struct device *d, 1712 + struct device_attribute *attr, 1713 + const char *buf, size_t count) 1714 + { 1715 + struct bonding *bond = to_bond(d); 1716 + int new_value, ret = count; 1717 + 1718 + if (sscanf(buf, "%d", &new_value) != 1) { 1719 + pr_err("%s: no lp interval value specified.\n", 1720 + bond->dev->name); 1721 + ret = -EINVAL; 1722 + goto out; 1723 + } 1724 + 1725 + if (new_value <= 0) { 1726 + pr_err ("%s: lp_interval must be between 1 and %d\n", 1727 + bond->dev->name, INT_MAX); 1728 + ret = -EINVAL; 1729 + goto out; 1730 + } 1731 + 1732 + bond->params.lp_interval = new_value; 1733 + out: 1734 + return ret; 1735 + } 1736 + 1737 + static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR, 1738 + bonding_show_lp_interval, bonding_store_lp_interval); 1739 + 1702 1740 static struct attribute *per_bond_attrs[] = { 1703 1741 &dev_attr_slaves.attr, 1704 1742 &dev_attr_mode.attr, ··· 1767 1729 &dev_attr_all_slaves_active.attr, 1768 1730 &dev_attr_resend_igmp.attr, 1769 1731 &dev_attr_min_links.attr, 1732 + &dev_attr_lp_interval.attr, 1770 1733 NULL, 1771 1734 }; 1772 1735
+1
drivers/net/bonding/bonding.h
··· 176 176 int tx_queues; 177 177 int all_slaves_active; 178 178 int resend_igmp; 179 + int lp_interval; 179 180 }; 180 181 181 182 struct bond_parm_tbl {
+2 -2
drivers/net/ethernet/adi/bfin_mac.c
··· 530 530 if (lp->wol && !lp->irq_wake_requested) { 531 531 /* register wake irq handler */ 532 532 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, 533 - IRQF_DISABLED, "EMAC_WAKE", dev); 533 + 0, "EMAC_WAKE", dev); 534 534 if (rc) 535 535 return rc; 536 536 lp->irq_wake_requested = true; ··· 1686 1686 /* now, enable interrupts */ 1687 1687 /* register irq handler */ 1688 1688 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, 1689 - IRQF_DISABLED, "EMAC_RX", ndev); 1689 + 0, "EMAC_RX", ndev); 1690 1690 if (rc) { 1691 1691 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); 1692 1692 rc = -EBUSY;
+1 -1
drivers/net/ethernet/amd/sun3lance.c
··· 358 358 359 359 REGA(CSR0) = CSR0_STOP; 360 360 361 - if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { 361 + if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) { 362 362 #ifdef CONFIG_SUN3 363 363 iounmap((void __iomem *)ioaddr); 364 364 #endif
+2 -7
drivers/net/ethernet/atheros/alx/main.c
··· 1188 1188 struct alx_priv *alx; 1189 1189 struct alx_hw *hw; 1190 1190 bool phy_configured; 1191 - int bars, pm_cap, err; 1191 + int bars, err; 1192 1192 1193 1193 err = pci_enable_device_mem(pdev); 1194 1194 if (err) ··· 1225 1225 pci_enable_pcie_error_reporting(pdev); 1226 1226 pci_set_master(pdev); 1227 1227 1228 - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 1229 - if (pm_cap == 0) { 1228 + if (!pdev->pm_cap) { 1230 1229 dev_err(&pdev->dev, 1231 1230 "Can't find power management capability, aborting\n"); 1232 1231 err = -EIO; 1233 1232 goto out_pci_release; 1234 1233 } 1235 - 1236 - err = pci_set_power_state(pdev, PCI_D0); 1237 - if (err) 1238 - goto out_pci_release; 1239 1234 1240 1235 netdev = alloc_etherdev(sizeof(*alx)); 1241 1236 if (!netdev) {
+31 -13
drivers/net/ethernet/broadcom/bgmac.c
··· 157 157 if (++ring->end >= BGMAC_TX_RING_SLOTS) 158 158 ring->end = 0; 159 159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 160 + ring->index_base + 160 161 ring->end * sizeof(struct bgmac_dma_desc)); 161 162 162 163 /* Always keep one slot free to allow detecting bugged calls. */ ··· 181 180 182 181 /* The last slot that hardware didn't consume yet */ 183 182 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 183 + empty_slot &= BGMAC_DMA_TX_STATDPTR; 184 + empty_slot -= ring->index_base; 184 185 empty_slot &= BGMAC_DMA_TX_STATDPTR; 185 186 empty_slot /= sizeof(struct bgmac_dma_desc); 186 187 ··· 276 273 int handled = 0; 277 274 278 275 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 276 + end_slot &= BGMAC_DMA_RX_STATDPTR; 277 + end_slot -= ring->index_base; 279 278 end_slot &= BGMAC_DMA_RX_STATDPTR; 280 279 end_slot /= sizeof(struct bgmac_dma_desc); 281 280 ··· 423 418 ring = &bgmac->tx_ring[i]; 424 419 ring->num_slots = BGMAC_TX_RING_SLOTS; 425 420 ring->mmio_base = ring_base[i]; 426 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX)) 427 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n", 428 - ring->mmio_base); 429 421 430 422 /* Alloc ring of descriptors */ 431 423 size = ring->num_slots * sizeof(struct bgmac_dma_desc); ··· 437 435 if (ring->dma_base & 0xC0000000) 438 436 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 439 437 438 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 439 + BGMAC_DMA_RING_TX); 440 + if (ring->unaligned) 441 + ring->index_base = lower_32_bits(ring->dma_base); 442 + else 443 + ring->index_base = 0; 444 + 440 445 /* No need to alloc TX slots yet */ 441 446 } 442 447 ··· 453 444 ring = &bgmac->rx_ring[i]; 454 445 ring->num_slots = BGMAC_RX_RING_SLOTS; 455 446 ring->mmio_base = ring_base[i]; 456 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX)) 457 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n", 458 - ring->mmio_base); 459 447 460 448 /* Alloc ring of descriptors */ 461 449 size = ring->num_slots * sizeof(struct bgmac_dma_desc); ··· 467 461 } 468 462 if (ring->dma_base & 0xC0000000) 469 463 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 464 + 465 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 466 + BGMAC_DMA_RING_RX); 467 + if (ring->unaligned) 468 + ring->index_base = lower_32_bits(ring->dma_base); 469 + else 470 + ring->index_base = 0; 470 471 471 472 /* Alloc RX slots */ 472 473 for (j = 0; j < ring->num_slots; j++) { ··· 502 489 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 503 490 ring = &bgmac->tx_ring[i]; 504 491 505 - /* We don't implement unaligned addressing, so enable first */ 506 - bgmac_dma_tx_enable(bgmac, ring); 492 + if (!ring->unaligned) 493 + bgmac_dma_tx_enable(bgmac, ring); 507 494 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 508 495 lower_32_bits(ring->dma_base)); 509 496 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 510 497 upper_32_bits(ring->dma_base)); 498 + if (ring->unaligned) 499 + bgmac_dma_tx_enable(bgmac, ring); 511 500 512 501 ring->start = 0; 513 502 ring->end = 0; /* Points the slot that should *not* be read */ ··· 520 505 521 506 ring = &bgmac->rx_ring[i]; 522 507 523 - /* We don't implement unaligned addressing, so enable first */ 524 - bgmac_dma_rx_enable(bgmac, ring); 508 + if (!ring->unaligned) 509 + bgmac_dma_rx_enable(bgmac, ring); 525 510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 526 511 lower_32_bits(ring->dma_base)); 527 512 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 528 513 upper_32_bits(ring->dma_base)); 514 + if (ring->unaligned) 515 + bgmac_dma_rx_enable(bgmac, ring); 529 516 530 517 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; 531 518 j++, dma_desc++) { ··· 548 531 } 549 532 550 533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 534 + ring->index_base + 551 535 ring->num_slots * sizeof(struct bgmac_dma_desc)); 552 536 553 537 ring->start = 0; ··· 926 908 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; 927 909 u8 et_swtype = 0; 928 910 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | 929 - BGMAC_CHIPCTL_1_IF_TYPE_RMII; 930 - char buf[2]; 911 + BGMAC_CHIPCTL_1_IF_TYPE_MII; 912 + char buf[4]; 931 913 932 - if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) { 914 + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { 933 915 if (kstrtou8(buf, 0, &et_swtype)) 934 916 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", 935 917 buf);
+3 -1
drivers/net/ethernet/broadcom/bgmac.h
··· 333 333 334 334 #define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030 335 335 #define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000 336 - #define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010 336 + #define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010 337 337 #define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020 338 338 #define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0 339 339 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000 ··· 384 384 u16 mmio_base; 385 385 struct bgmac_dma_desc *cpu_base; 386 386 dma_addr_t dma_base; 387 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */ 388 + bool unaligned; 387 389 388 390 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; 389 391 };
+33 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 246 246 BNX2X_MAX_CNIC_ETH_CL_ID_IDX, 247 247 }; 248 248 249 - #define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\ 249 + /* use a value high enough to be above all the PFs, which has least significant 250 + * nibble as 8, so when cnic needs to come up with a CID for UIO to use to 251 + * calculate doorbell address according to old doorbell configuration scheme 252 + * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number 253 + * We must avoid coming up with cid 8 for iscsi since according to this method 254 + * the designated UIO cid will come out 0 and it has a special handling for that 255 + * case which doesn't suit us. Therefore will will cieling to closes cid which 256 + * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18. 257 + */ 258 + 259 + #define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \ 250 260 (bp)->max_cos) 261 + /* amount of cids traversed by UIO's DPM addition to doorbell */ 262 + #define UIO_DPM 8 263 + /* roundup to DPM offset */ 264 + #define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \ 265 + UIO_DPM)) 266 + /* offset to nearest value which has lsb nibble matching DPM */ 267 + #define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \ 268 + (UIO_DPM * 2)) 269 + /* add offset to rounded-up cid to get a value which could be used with UIO */ 270 + #define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp)) 271 + /* but wait - avoid UIO special case for cid 0 */ 272 + #define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \ 273 + (UIO_DPM_ALIGN(bp) == UIO_DPM)) 274 + /* Properly DPM aligned CID dajusted to cid 0 secal case */ 275 + #define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \ 276 + (UIO_DPM_CID0_OFFSET(bp))) 277 + /* how many cids were wasted - need this value for cid allocation */ 278 + #define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \ 279 + BNX2X_1st_NON_L2_ETH_CID(bp)) 251 280 /* iSCSI L2 */ 252 281 #define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) 253 282 /* FCoE L2 */ ··· 1571 1542 */ 1572 1543 bool fcoe_init; 1573 1544 1574 - int pm_cap; 1575 1545 int mrrs; 1576 1546 1577 1547 struct delayed_work sp_task; ··· 1709 1681 * Maximum CID count that might be required by the bnx2x: 1710 1682 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1711 1683 */ 1684 + 1712 1685 #define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1713 - + 2 * CNIC_SUPPORT(bp)) 1686 + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) 1714 1687 #define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1715 - + 2 * CNIC_SUPPORT(bp)) 1688 + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) 1716 1689 #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1717 1690 ILT_PAGE_CIDS)) 1718 1691
+4 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 3008 3008 u16 pmcsr; 3009 3009 3010 3010 /* If there is no power capability, silently succeed */ 3011 - if (!bp->pm_cap) { 3011 + if (!bp->pdev->pm_cap) { 3012 3012 BNX2X_DEV_INFO("No power capability. Breaking.\n"); 3013 3013 return 0; 3014 3014 } 3015 3015 3016 - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 3016 + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); 3017 3017 3018 3018 switch (state) { 3019 3019 case PCI_D0: 3020 - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3020 + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3021 3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 3022 3022 PCI_PM_CTRL_PME_STATUS)); 3023 3023 ··· 3041 3041 if (bp->wol) 3042 3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 3043 3043 3044 - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 3044 + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, 3045 3045 pmcsr); 3046 3046 3047 3047 /* No more memory access after this point until
+2 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 1387 1387 u16 pm = 0; 1388 1388 struct net_device *dev = pci_get_drvdata(bp->pdev); 1389 1389 1390 - if (bp->pm_cap) 1390 + if (bp->pdev->pm_cap) 1391 1391 rc = pci_read_config_word(bp->pdev, 1392 - bp->pm_cap + PCI_PM_CTRL, &pm); 1392 + bp->pdev->pm_cap + PCI_PM_CTRL, &pm); 1393 1393 1394 1394 if ((rc && !netif_running(dev)) || 1395 1395 (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
+9 -5
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 8652 8652 else if (bp->wol) { 8653 8653 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8654 8654 u8 *mac_addr = bp->dev->dev_addr; 8655 + struct pci_dev *pdev = bp->pdev; 8655 8656 u32 val; 8656 8657 u16 pmc; 8657 8658 ··· 8669 8668 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8670 8669 8671 8670 /* Enable the PME and clear the status */ 8672 - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); 8671 + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); 8673 8672 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8674 - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); 8673 + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); 8675 8674 8676 8675 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8677 8676 ··· 10400 10399 break; 10401 10400 } 10402 10401 10403 - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 10402 + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); 10404 10403 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10405 10404 10406 10405 BNX2X_DEV_INFO("%sWoL capable\n", ··· 12142 12141 } 12143 12142 12144 12143 if (IS_PF(bp)) { 12145 - bp->pm_cap = pdev->pm_cap; 12146 - if (bp->pm_cap == 0) { 12144 + if (!pdev->pm_cap) { 12147 12145 dev_err(&bp->pdev->dev, 12148 12146 "Cannot find power management capability, aborting\n"); 12149 12147 rc = -EIO; ··· 13631 13631 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13632 13632 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13633 13633 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13634 + 13635 + DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", 13636 + BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, 13637 + cp->iscsi_l2_cid); 13634 13638 13635 13639 if (NO_ISCSI_OOO(bp)) 13636 13640 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+4 -2
drivers/net/ethernet/broadcom/cnic.c
··· 3135 3135 { 3136 3136 struct cnic_dev *dev = (struct cnic_dev *) data; 3137 3137 struct cnic_local *cp = dev->cnic_priv; 3138 + struct bnx2x *bp = netdev_priv(dev->netdev); 3138 3139 u32 status_idx, new_status_idx; 3139 3140 3140 3141 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) ··· 3147 3146 CNIC_WR16(dev, cp->kcq1.io_addr, 3148 3147 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3149 3148 3150 - if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) { 3149 + if (!CNIC_SUPPORTS_FCOE(bp)) { 3151 3150 cp->arm_int(dev, status_idx); 3152 3151 break; 3153 3152 } ··· 5218 5217 "iSCSI CLIENT_SETUP did not complete\n"); 5219 5218 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5220 5219 cnic_ring_ctl(dev, cid, cli, 1); 5221 - *cid_ptr = cid; 5220 + *cid_ptr = cid >> 4; 5221 + *(cid_ptr + 1) = cid * bp->db_size; 5222 5222 } 5223 5223 } 5224 5224
+3 -3
drivers/net/ethernet/broadcom/tg3.c
··· 3034 3034 { 3035 3035 switch (tg3_asic_rev(tp)) { 3036 3036 case ASIC_REV_5719: 3037 + case ASIC_REV_5720: 3037 3038 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && 3038 3039 !tp->pci_fn) 3039 3040 return true; ··· 16193 16192 * So explicitly force the chip into D0 here. 16194 16193 */ 16195 16194 pci_read_config_dword(tp->pdev, 16196 - tp->pm_cap + PCI_PM_CTRL, 16195 + tp->pdev->pm_cap + PCI_PM_CTRL, 16197 16196 &pm_reg); 16198 16197 pm_reg &= ~PCI_PM_CTRL_STATE_MASK; 16199 16198 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; 16200 16199 pci_write_config_dword(tp->pdev, 16201 - tp->pm_cap + PCI_PM_CTRL, 16200 + tp->pdev->pm_cap + PCI_PM_CTRL, 16202 16201 pm_reg); 16203 16202 16204 16203 /* Also, force SERR#/PERR# in PCI command. */ ··· 17347 17346 tp = netdev_priv(dev); 17348 17347 tp->pdev = pdev; 17349 17348 tp->dev = dev; 17350 - tp->pm_cap = pdev->pm_cap; 17351 17349 tp->rx_mode = TG3_DEF_RX_MODE; 17352 17350 tp->tx_mode = TG3_DEF_TX_MODE; 17353 17351 tp->irq_sync = 1;
-1
drivers/net/ethernet/broadcom/tg3.h
··· 3234 3234 u8 pci_lat_timer; 3235 3235 3236 3236 int pci_fn; 3237 - int pm_cap; 3238 3237 int msi_cap; 3239 3238 int pcix_cap; 3240 3239 int pcie_readrq;
+3 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 6149 6149 pr_warn("could not create debugfs entry, continuing\n"); 6150 6150 6151 6151 ret = pci_register_driver(&cxgb4_driver); 6152 - if (ret < 0) 6152 + if (ret < 0) { 6153 6153 debugfs_remove(cxgb4_debugfs_root); 6154 + destroy_workqueue(workq); 6155 + } 6154 6156 6155 6157 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6156 6158
+1 -1
drivers/net/ethernet/dec/tulip/de4x5.c
··· 1321 1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, 1322 1322 lp->adapter_name, dev)) { 1323 1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); 1324 - if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED, 1324 + if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, 1325 1325 lp->adapter_name, dev)) { 1326 1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n"); 1327 1327 disable_ast(dev);
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 2802 2802 struct be_resources res = {0}; 2803 2803 struct be_vf_cfg *vf_cfg; 2804 2804 u32 cap_flags, en_flags, vf; 2805 - int status; 2805 + int status = 0; 2806 2806 2807 2807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2808 2808 BE_IF_FLAGS_MULTICAST;
+1 -1
drivers/net/ethernet/freescale/fec_main.c
··· 2199 2199 goto failed_irq; 2200 2200 } 2201 2201 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 2202 - IRQF_DISABLED, pdev->name, ndev); 2202 + 0, pdev->name, ndev); 2203 2203 if (ret) 2204 2204 goto failed_irq; 2205 2205 }
+1 -1
drivers/net/ethernet/hp/hp100.c
··· 1097 1097 /* New: if bus is PCI or EISA, interrupts might be shared interrupts */ 1098 1098 if (request_irq(dev->irq, hp100_interrupt, 1099 1099 lp->bus == HP100_BUS_PCI || lp->bus == 1100 - HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED, 1100 + HP100_BUS_EISA ? IRQF_SHARED : 0, 1101 1101 "hp100", dev)) { 1102 1102 printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); 1103 1103 return -EAGAIN;
+16 -5
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 102 102 103 103 static int ehea_remove(struct platform_device *dev); 104 104 105 + static struct of_device_id ehea_module_device_table[] = { 106 + { 107 + .name = "lhea", 108 + .compatible = "IBM,lhea", 109 + }, 110 + { 111 + .type = "network", 112 + .compatible = "IBM,lhea-ethernet", 113 + }, 114 + {}, 115 + }; 116 + MODULE_DEVICE_TABLE(of, ehea_module_device_table); 117 + 105 118 static struct of_device_id ehea_device_table[] = { 106 119 { 107 120 .name = "lhea", ··· 122 109 }, 123 110 {}, 124 111 }; 125 - MODULE_DEVICE_TABLE(of, ehea_device_table); 126 112 127 113 static struct platform_driver ehea_driver = { 128 114 .driver = { ··· 1297 1285 1298 1286 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, 1299 1287 ehea_qp_aff_irq_handler, 1300 - IRQF_DISABLED, port->int_aff_name, port); 1288 + 0, port->int_aff_name, port); 1301 1289 if (ret) { 1302 1290 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", 1303 1291 port->qp_eq->attr.ist1); ··· 1315 1303 "%s-queue%d", dev->name, i); 1316 1304 ret = ibmebus_request_irq(pr->eq->attr.ist1, 1317 1305 ehea_recv_irq_handler, 1318 - IRQF_DISABLED, pr->int_send_name, 1319 - pr); 1306 + 0, pr->int_send_name, pr); 1320 1307 if (ret) { 1321 1308 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", 1322 1309 i, pr->eq->attr.ist1); ··· 3331 3320 } 3332 3321 3333 3322 ret = ibmebus_request_irq(adapter->neq->attr.ist1, 3334 - ehea_interrupt_neq, IRQF_DISABLED, 3323 + ehea_interrupt_neq, 0, 3335 3324 "ehea_neq", adapter); 3336 3325 if (ret) { 3337 3326 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+8
drivers/net/ethernet/intel/e1000e/ethtool.c
··· 922 922 else 923 923 mask &= ~(1 << 30); 924 924 } 925 + if (mac->type == e1000_pch2lan) { 926 + /* SHRAH[0,1,2] different than previous */ 927 + if (i == 7) 928 + mask &= 0xFFF4FFFF; 929 + /* SHRAH[3] different than SHRAH[0,1,2] */ 930 + if (i == 10) 931 + mask |= (1 << 30); 932 + } 925 933 926 934 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 927 935 0xFFFFFFFF);
+8 -5
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 1371 1371 return; 1372 1372 } 1373 1373 1374 - if (index < hw->mac.rar_entry_count) { 1374 + /* RAR[1-6] are owned by manageability. Skip those and program the 1375 + * next address into the SHRA register array. 1376 + */ 1377 + if (index < (u32)(hw->mac.rar_entry_count - 6)) { 1375 1378 s32 ret_val; 1376 1379 1377 1380 ret_val = e1000_acquire_swflag_ich8lan(hw); ··· 1965 1962 if (ret_val) 1966 1963 goto release; 1967 1964 1968 - /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ 1969 - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1965 + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 1966 + for (i = 0; i < (hw->mac.rar_entry_count); i++) { 1970 1967 mac_reg = er32(RAL(i)); 1971 1968 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), 1972 1969 (u16)(mac_reg & 0xFFFF)); ··· 2010 2007 return ret_val; 2011 2008 2012 2009 if (enable) { 2013 - /* Write Rx addresses (rar_entry_count for RAL/H, +4 for 2010 + /* Write Rx addresses (rar_entry_count for RAL/H, and 2014 2011 * SHRAL/H) and initial CRC values to the MAC 2015 2012 */ 2016 - for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 2013 + for (i = 0; i < hw->mac.rar_entry_count; i++) { 2017 2014 u8 mac_addr[ETH_ALEN] = { 0 }; 2018 2015 u32 addr_high, addr_low; 2019 2016
+1 -1
drivers/net/ethernet/intel/e1000e/ich8lan.h
··· 98 98 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 99 99 100 100 #define E1000_ICH_RAR_ENTRIES 7 101 - #define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ 101 + #define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */ 102 102 #define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ 103 103 104 104 #define PHY_PAGE_SHIFT 5
+1 -1
drivers/net/ethernet/intel/e1000e/netdev.c
··· 4868 4868 */ 4869 4869 if ((hw->phy.type == e1000_phy_igp_3 || 4870 4870 hw->phy.type == e1000_phy_bm) && 4871 - (hw->mac.autoneg == true) && 4871 + hw->mac.autoneg && 4872 4872 (adapter->link_speed == SPEED_10 || 4873 4873 adapter->link_speed == SPEED_100) && 4874 4874 (adapter->link_duplex == HALF_DUPLEX)) {
+4
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 719 719 u32 ctrl_ext; 720 720 u32 mdic; 721 721 722 + /* Extra read required for some PHY's on i354 */ 723 + if (hw->mac.type == e1000_i354) 724 + igb_get_phy_id(hw); 725 + 722 726 /* For SGMII PHYs, we try the list of possible addresses until 723 727 * we find one that works. For non-SGMII PHYs 724 728 * (e.g. integrated copper PHYs), an address of 1 should
+9 -1
drivers/net/ethernet/intel/igb/e1000_mac.c
··· 712 712 static s32 igb_set_default_fc(struct e1000_hw *hw) 713 713 { 714 714 s32 ret_val = 0; 715 + u16 lan_offset; 715 716 u16 nvm_data; 716 717 717 718 /* Read and store word 0x0F of the EEPROM. This word contains bits ··· 723 722 * control setting, then the variable hw->fc will 724 723 * be initialized based on a value in the EEPROM. 725 724 */ 726 - ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 725 + if (hw->mac.type == e1000_i350) { 726 + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); 727 + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG 728 + + lan_offset, 1, &nvm_data); 729 + } else { 730 + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 731 + 1, &nvm_data); 732 + } 727 733 728 734 if (ret_val) { 729 735 hw_dbg("NVM Read Error\n");
+25
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 160 160 bool autoneg = false; 161 161 bool link_up; 162 162 163 + /* SFP type is needed for get_link_capabilities */ 164 + if (hw->phy.media_type & (ixgbe_media_type_fiber | 165 + ixgbe_media_type_fiber_qsfp)) { 166 + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 167 + hw->phy.ops.identify_sfp(hw); 168 + } 169 + 163 170 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); 164 171 165 172 /* set the supported link speeds */ ··· 193 186 ecmd->advertising |= ADVERTISED_1000baseT_Full; 194 187 if (supported_link & IXGBE_LINK_SPEED_100_FULL) 195 188 ecmd->advertising |= ADVERTISED_100baseT_Full; 189 + 190 + if (hw->phy.multispeed_fiber && !autoneg) { 191 + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) 192 + ecmd->advertising = ADVERTISED_10000baseT_Full; 193 + } 196 194 } 197 195 198 196 if (autoneg) { ··· 325 313 */ 326 314 if (ecmd->advertising & ~ecmd->supported) 327 315 return -EINVAL; 316 + 317 + /* only allow one speed at a time if no autoneg */ 318 + if (!ecmd->autoneg && hw->phy.multispeed_fiber) { 319 + if (ecmd->advertising == 320 + (ADVERTISED_10000baseT_Full | 321 + ADVERTISED_1000baseT_Full)) 322 + return -EINVAL; 323 + } 328 324 329 325 old = hw->phy.autoneg_advertised; 330 326 advertised = 0; ··· 1825 1805 unsigned int size = 1024; 1826 1806 netdev_tx_t tx_ret_val; 1827 1807 struct sk_buff *skb; 1808 + u32 flags_orig = adapter->flags; 1809 + 1810 + /* DCB can modify the frames on Tx */ 1811 + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 1828 1812 1829 1813 /* allocate test skb */ 1830 1814 skb = alloc_skb(size, GFP_KERNEL); ··· 1881 1857 1882 1858 /* free the original skb */ 1883 1859 kfree_skb(skb); 1860 + adapter->flags = flags_orig; 1884 1861 1885 1862 return ret_val; 1886 1863 }
+17 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 3571 3571 { 3572 3572 struct ixgbe_hw *hw = &adapter->hw; 3573 3573 int i; 3574 - u32 rxctrl; 3574 + u32 rxctrl, rfctl; 3575 3575 3576 3576 /* disable receives while setting up the descriptors */ 3577 3577 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ··· 3579 3579 3580 3580 ixgbe_setup_psrtype(adapter); 3581 3581 ixgbe_setup_rdrxctl(adapter); 3582 + 3583 + /* RSC Setup */ 3584 + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); 3585 + rfctl &= ~IXGBE_RFCTL_RSC_DIS; 3586 + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 3587 + rfctl |= IXGBE_RFCTL_RSC_DIS; 3588 + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); 3582 3589 3583 3590 /* Program registers for the distribution of queues */ 3584 3591 ixgbe_setup_mrqc(adapter); ··· 6000 5993 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 6001 5994 6002 5995 speed = hw->phy.autoneg_advertised; 6003 - if ((!speed) && (hw->mac.ops.get_link_capabilities)) 5996 + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { 6004 5997 hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); 5998 + 5999 + /* setup the highest link when no autoneg */ 6000 + if (!autoneg) { 6001 + if (speed & IXGBE_LINK_SPEED_10GB_FULL) 6002 + speed = IXGBE_LINK_SPEED_10GB_FULL; 6003 + } 6004 + } 6005 + 6005 6006 if (hw->mac.ops.setup_link) 6006 6007 hw->mac.ops.setup_link(hw, speed, true); 6007 6008
+1
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
··· 1861 1861 #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 1862 1862 #define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E 1863 1863 #define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 1864 + #define IXGBE_RFCTL_RSC_DIS 0x00000020 1864 1865 #define IXGBE_RFCTL_NFSW_DIS 0x00000040 1865 1866 #define IXGBE_RFCTL_NFSR_DIS 0x00000080 1866 1867 #define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
+2 -4
drivers/net/ethernet/lantiq_etop.c
··· 282 282 283 283 if (IS_TX(i)) { 284 284 ltq_dma_alloc_tx(&ch->dma); 285 - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 286 - "etop_tx", priv); 285 + request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); 287 286 } else if (IS_RX(i)) { 288 287 ltq_dma_alloc_rx(&ch->dma); 289 288 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; ··· 290 291 if (ltq_etop_alloc_skb(ch)) 291 292 return -ENOMEM; 292 293 ch->dma.desc = 0; 293 - request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, 294 - "etop_rx", priv); 294 + request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); 295 295 } 296 296 ch->dma.irq = irq; 297 297 }
+1 -2
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1123 1123 struct pxa168_eth_private *pep = netdev_priv(dev); 1124 1124 int err; 1125 1125 1126 - err = request_irq(dev->irq, pxa168_eth_int_handler, 1127 - IRQF_DISABLED, dev->name, dev); 1126 + err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); 1128 1127 if (err) { 1129 1128 dev_err(&dev->dev, "can't assign irq\n"); 1130 1129 return -EAGAIN;
+3 -2
drivers/net/ethernet/marvell/skge.c
··· 3092 3092 if (!nskb) 3093 3093 goto resubmit; 3094 3094 3095 + skb = e->skb; 3096 + prefetch(skb->data); 3097 + 3095 3098 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { 3096 3099 dev_kfree_skb(nskb); 3097 3100 goto resubmit; ··· 3104 3101 dma_unmap_addr(e, mapaddr), 3105 3102 dma_unmap_len(e, maplen), 3106 3103 PCI_DMA_FROMDEVICE); 3107 - skb = e->skb; 3108 - prefetch(skb->data); 3109 3104 } 3110 3105 3111 3106 skb_put(skb, len);
+10 -6
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 53 53 for (i = 0; i < priv->tx_ring_num; i++) { 54 54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 55 55 priv->tx_cq[i].moder_time = priv->tx_usecs; 56 - err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 57 - if (err) 58 - return err; 56 + if (priv->port_up) { 57 + err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 58 + if (err) 59 + return err; 60 + } 59 61 } 60 62 61 63 if (priv->adaptive_rx_coal) ··· 67 65 priv->rx_cq[i].moder_cnt = priv->rx_frames; 68 66 priv->rx_cq[i].moder_time = priv->rx_usecs; 69 67 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 70 - err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 71 - if (err) 72 - return err; 68 + if (priv->port_up) { 69 + err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 70 + if (err) 71 + return err; 72 + } 73 73 } 74 74 75 75 return err;
+1 -1
drivers/net/ethernet/micrel/ks8851_mll.c
··· 915 915 struct ks_net *ks = netdev_priv(netdev); 916 916 int err; 917 917 918 - #define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW) 918 + #define KS_INT_FLAGS IRQF_TRIGGER_LOW 919 919 /* lock the card, even if we may not actually do anything 920 920 * else at the moment. 921 921 */
+1 -2
drivers/net/ethernet/natsemi/jazzsonic.c
··· 83 83 { 84 84 int retval; 85 85 86 - retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 87 - "sonic", dev); 86 + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); 88 87 if (retval) { 89 88 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 90 89 dev->name, dev->irq);
+1 -2
drivers/net/ethernet/natsemi/xtsonic.c
··· 95 95 { 96 96 int retval; 97 97 98 - retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, 99 - "sonic", dev); 98 + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); 100 99 if (retval) { 101 100 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 102 101 dev->name, dev->irq);
+2 -2
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 1219 1219 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", 1220 1220 dev->name); 1221 1221 1222 - ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED, 1222 + ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0, 1223 1223 mac->tx_irq_name, mac->tx); 1224 1224 if (ret) { 1225 1225 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", ··· 1230 1230 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", 1231 1231 dev->name); 1232 1232 1233 - ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED, 1233 + ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0, 1234 1234 mac->rx_irq_name, mac->rx); 1235 1235 if (ret) { 1236 1236 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
+1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
··· 1561 1561 { 1562 1562 int err; 1563 1563 1564 + adapter->need_fw_reset = 0; 1564 1565 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); 1565 1566 qlcnic_83xx_enable_mbx_interrupt(adapter); 1566 1567
+1
drivers/net/ethernet/realtek/r8169.c
··· 4231 4231 case RTL_GIGA_MAC_VER_23: 4232 4232 case RTL_GIGA_MAC_VER_24: 4233 4233 case RTL_GIGA_MAC_VER_34: 4234 + case RTL_GIGA_MAC_VER_35: 4234 4235 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4235 4236 break; 4236 4237 case RTL_GIGA_MAC_VER_40:
+1 -1
drivers/net/ethernet/sfc/Kconfig
··· 7 7 select I2C_ALGOBIT 8 8 select PTP_1588_CLOCK 9 9 ---help--- 10 - This driver supports 10-gigabit Ethernet cards based on 10 + This driver supports 10/40-gigabit Ethernet cards based on 11 11 the Solarflare SFC4000, SFC9000-family and SFC9100-family 12 12 controllers. 13 13
+37 -21
drivers/net/ethernet/sfc/ef10.c
··· 94 94 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); 95 95 } 96 96 97 - static int efx_ef10_init_capabilities(struct efx_nic *efx) 97 + static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 98 98 { 99 99 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); 100 100 struct efx_ef10_nic_data *nic_data = efx->nic_data; ··· 107 107 outbuf, sizeof(outbuf), &outlen); 108 108 if (rc) 109 109 return rc; 110 + if (outlen < sizeof(outbuf)) { 111 + netif_err(efx, drv, efx->net_dev, 112 + "unable to read datapath firmware capabilities\n"); 113 + return -EIO; 114 + } 110 115 111 - if (outlen >= sizeof(outbuf)) { 112 - nic_data->datapath_caps = 113 - MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 114 - if (!(nic_data->datapath_caps & 115 - (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { 116 - netif_err(efx, drv, efx->net_dev, 117 - "Capabilities don't indicate TSO support.\n"); 118 - return -ENODEV; 119 - } 116 + nic_data->datapath_caps = 117 + MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 118 + 119 + if (!(nic_data->datapath_caps & 120 + (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { 121 + netif_err(efx, drv, efx->net_dev, 122 + "current firmware does not support TSO\n"); 123 + return -ENODEV; 124 + } 125 + 126 + if (!(nic_data->datapath_caps & 127 + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 128 + netif_err(efx, probe, efx->net_dev, 129 + "current firmware does not support an RX prefix\n"); 130 + return -ENODEV; 120 131 } 121 132 122 133 return 0; ··· 228 217 if (rc) 229 218 goto fail3; 230 219 231 - rc = efx_ef10_init_capabilities(efx); 220 + rc = efx_ef10_init_datapath_caps(efx); 232 221 if (rc < 0) 233 222 goto fail3; 234 223 235 224 efx->rx_packet_len_offset = 236 225 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 237 - 238 - if (!(nic_data->datapath_caps & 239 - (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 240 - netif_err(efx, probe, efx->net_dev, 241 - "current firmware does not support an RX prefix\n"); 242 - rc = -ENODEV; 243 - goto fail3; 244 - } 245 226 246 227 rc = efx_mcdi_port_get_number(efx); 247 228 if (rc < 0) ··· 262 259 rc = efx_mcdi_mon_probe(efx); 263 260 if (rc) 264 261 goto fail3; 265 - 266 - efx_ptp_probe(efx); 267 262 268 263 return 0; 269 264 ··· 342 341 { 343 342 struct efx_ef10_nic_data *nic_data = efx->nic_data; 344 343 int rc; 344 + 345 + if (nic_data->must_check_datapath_caps) { 346 + rc = efx_ef10_init_datapath_caps(efx); 347 + if (rc) 348 + return rc; 349 + nic_data->must_check_datapath_caps = false; 350 + } 345 351 346 352 if (nic_data->must_realloc_vis) { 347 353 /* We cannot let the number of VIs change now */ ··· 717 709 nic_data->must_realloc_vis = true; 718 710 nic_data->must_restore_filters = true; 719 711 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 712 + 713 + /* The datapath firmware might have been changed */ 714 + nic_data->must_check_datapath_caps = true; 715 + 716 + /* MAC statistics have been cleared on the NIC; clear the local 717 + * statistic that we update with efx_update_diff_stat(). 718 + */ 719 + nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; 720 720 721 721 return -EIO; 722 722 }
+2
drivers/net/ethernet/sfc/mcdi_port.c
··· 556 556 case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; 557 557 case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; 558 558 case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; 559 + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; 559 560 default: return -EINVAL; 560 561 } 561 562 } else { ··· 842 841 [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, 843 842 [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, 844 843 [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, 844 + [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, 845 845 }; 846 846 847 847 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+3
drivers/net/ethernet/sfc/nic.h
··· 400 400 * @rx_rss_context: Firmware handle for our RSS context 401 401 * @stats: Hardware statistics 402 402 * @workaround_35388: Flag: firmware supports workaround for bug 35388 403 + * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated 404 + * after MC reboot 403 405 * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of 404 406 * %MC_CMD_GET_CAPABILITIES response) 405 407 */ ··· 415 413 u32 rx_rss_context; 416 414 u64 stats[EF10_STAT_COUNT]; 417 415 bool workaround_35388; 416 + bool must_check_datapath_caps; 418 417 u32 datapath_caps; 419 418 }; 420 419
+1 -1
drivers/net/ethernet/smsc/smc91x.h
··· 271 271 #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) 272 272 #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) 273 273 274 - #define SMC_IRQ_FLAGS (IRQF_DISABLED) 274 + #define SMC_IRQ_FLAGS 0 275 275 276 276 #else 277 277
+1 -2
drivers/net/ethernet/smsc/smsc9420.c
··· 1356 1356 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1357 1357 smsc9420_pci_flush_write(pd); 1358 1358 1359 - result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1360 - DRV_NAME, pd); 1359 + result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd); 1361 1360 if (result) { 1362 1361 smsc_warn(IFUP, "Unable to use IRQ = %d", irq); 1363 1362 result = -ENODEV;
+1 -1
drivers/net/ethernet/toshiba/ps3_gelic_net.c
··· 1726 1726 goto fail_alloc_irq; 1727 1727 } 1728 1728 result = request_irq(card->irq, gelic_card_interrupt, 1729 - IRQF_DISABLED, netdev->name, card); 1729 + 0, netdev->name, card); 1730 1730 1731 1731 if (result) { 1732 1732 dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
+23 -17
drivers/net/irda/mcs7780.c
··· 191 191 goto error; 192 192 193 193 ret = 0; 194 - error: 195 - return ret; 194 + error: 195 + return ret; 196 196 } 197 197 198 198 /* Setup a communication between mcs7780 and agilent chip. */ ··· 501 501 return 0; 502 502 503 503 mcs->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 504 - if (!mcs->rx_urb) 504 + if (!mcs->rx_urb) { 505 + usb_free_urb(mcs->tx_urb); 506 + mcs->tx_urb = NULL; 505 507 return 0; 508 + } 506 509 507 510 return 1; 508 511 } ··· 646 643 ret = mcs_set_reg(mcs, MCS_MODE_REG, rval); 647 644 648 645 mcs->speed = mcs->new_speed; 649 - error: 650 - mcs->new_speed = 0; 651 - return ret; 646 + error: 647 + mcs->new_speed = 0; 648 + return ret; 652 649 } 653 650 654 651 /* Ioctl calls not supported at this time. Can be an area of future work. */ ··· 741 738 742 739 ret = mcs_receive_start(mcs); 743 740 if (ret) 744 - goto error3; 741 + goto error4; 745 742 746 743 netif_start_queue(netdev); 747 744 return 0; 748 745 749 - error3: 750 - irlap_close(mcs->irlap); 751 - error2: 752 - kfree_skb(mcs->rx_buff.skb); 753 - error1: 754 - return ret; 746 + error4: 747 + usb_free_urb(mcs->rx_urb); 748 + usb_free_urb(mcs->tx_urb); 749 + error3: 750 + irlap_close(mcs->irlap); 751 + error2: 752 + kfree_skb(mcs->rx_buff.skb); 753 + error1: 754 + return ret; 755 755 } 756 756 757 757 /* Receive callback function. */ ··· 952 946 usb_set_intfdata(intf, mcs); 953 947 return 0; 954 948 955 - error2: 956 - free_netdev(ndev); 949 + error2: 950 + free_netdev(ndev); 957 951 958 - error1: 959 - return ret; 952 + error1: 953 + return ret; 960 954 } 961 955 962 956 /* The current device is removed, the USB layer tells us to shut down. */
+1
drivers/net/loopback.c
··· 146 146 147 147 static void loopback_dev_free(struct net_device *dev) 148 148 { 149 + dev_net(dev)->loopback_dev = NULL; 149 150 free_percpu(dev->lstats); 150 151 free_netdev(dev); 151 152 }
+1 -4
drivers/net/netconsole.c
··· 684 684 case NETDEV_RELEASE: 685 685 case NETDEV_JOIN: 686 686 case NETDEV_UNREGISTER: 687 - /* 688 - * rtnl_lock already held 687 + /* rtnl_lock already held 689 688 * we might sleep in __netpoll_cleanup() 690 689 */ 691 690 spin_unlock_irqrestore(&target_list_lock, flags); 692 691 693 - mutex_lock(&nt->mutex); 694 692 __netpoll_cleanup(&nt->np); 695 - mutex_unlock(&nt->mutex); 696 693 697 694 spin_lock_irqsave(&target_list_lock, flags); 698 695 dev_put(nt->np.dev);
+2 -2
drivers/net/phy/cicada.c
··· 30 30 #include <linux/ethtool.h> 31 31 #include <linux/phy.h> 32 32 33 - #include <asm/io.h> 33 + #include <linux/io.h> 34 34 #include <asm/irq.h> 35 - #include <asm/uaccess.h> 35 + #include <linux/uaccess.h> 36 36 37 37 /* Cicada Extended Control Register 1 */ 38 38 #define MII_CIS8201_EXT_CON1 0x17
+1 -1
drivers/net/ppp/pptp.c
··· 281 281 nf_reset(skb); 282 282 283 283 skb->ip_summed = CHECKSUM_NONE; 284 - ip_select_ident(iph, &rt->dst, NULL); 284 + ip_select_ident(skb, &rt->dst, NULL); 285 285 ip_send_check(iph); 286 286 287 287 ip_local_out(skb);
+8 -3
drivers/net/tun.c
··· 1641 1641 INIT_LIST_HEAD(&tun->disabled); 1642 1642 err = tun_attach(tun, file, false); 1643 1643 if (err < 0) 1644 - goto err_free_dev; 1644 + goto err_free_flow; 1645 1645 1646 1646 err = register_netdevice(tun->dev); 1647 1647 if (err < 0) 1648 - goto err_free_dev; 1648 + goto err_detach; 1649 1649 1650 1650 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1651 1651 device_create_file(&tun->dev->dev, &dev_attr_owner) || ··· 1689 1689 strcpy(ifr->ifr_name, tun->dev->name); 1690 1690 return 0; 1691 1691 1692 - err_free_dev: 1692 + err_detach: 1693 + tun_detach_all(dev); 1694 + err_free_flow: 1695 + tun_flow_uninit(tun); 1696 + security_tun_dev_free_security(tun->security); 1697 + err_free_dev: 1693 1698 free_netdev(dev); 1694 1699 return err; 1695 1700 }
+42 -73
drivers/net/usb/cdc_ether.c
··· 33 33 #include <linux/usb/usbnet.h> 34 34 35 35 36 - #if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE) 36 + #if IS_ENABLED(CONFIG_USB_NET_RNDIS_HOST) 37 37 38 38 static int is_rndis(struct usb_interface_descriptor *desc) 39 39 { ··· 69 69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, 70 70 }; 71 71 72 - /* 73 - * probes control interface, claims data interface, collects the bulk 72 + /* probes control interface, claims data interface, collects the bulk 74 73 * endpoints, activates data interface (if needed), maybe sets MTU. 75 74 * all pure cdc, except for certain firmware workarounds, and knowing 76 75 * that rndis uses one different rule. ··· 87 88 struct usb_cdc_mdlm_desc *desc = NULL; 88 89 struct usb_cdc_mdlm_detail_desc *detail = NULL; 89 90 90 - if (sizeof dev->data < sizeof *info) 91 + if (sizeof(dev->data) < sizeof(*info)) 91 92 return -EDOM; 92 93 93 94 /* expect strict spec conformance for the descriptors, but ··· 125 126 is_activesync(&intf->cur_altsetting->desc) || 126 127 is_wireless_rndis(&intf->cur_altsetting->desc)); 127 128 128 - memset(info, 0, sizeof *info); 129 + memset(info, 0, sizeof(*info)); 129 130 info->control = intf; 130 131 while (len > 3) { 131 - if (buf [1] != USB_DT_CS_INTERFACE) 132 + if (buf[1] != USB_DT_CS_INTERFACE) 132 133 goto next_desc; 133 134 134 135 /* use bDescriptorSubType to identify the CDC descriptors. ··· 138 139 * in favor of a complicated OID-based RPC scheme doing what 139 140 * CDC Ethernet achieves with a simple descriptor. 140 141 */ 141 - switch (buf [2]) { 142 + switch (buf[2]) { 142 143 case USB_CDC_HEADER_TYPE: 143 144 if (info->header) { 144 145 dev_dbg(&intf->dev, "extra CDC header\n"); 145 146 goto bad_desc; 146 147 } 147 148 info->header = (void *) buf; 148 - if (info->header->bLength != sizeof *info->header) { 149 + if (info->header->bLength != sizeof(*info->header)) { 149 150 dev_dbg(&intf->dev, "CDC header len %u\n", 150 151 info->header->bLength); 151 152 goto bad_desc; ··· 174 175 goto bad_desc; 175 176 } 176 177 info->u = (void *) buf; 177 - if (info->u->bLength != sizeof *info->u) { 178 + if (info->u->bLength != sizeof(*info->u)) { 178 179 dev_dbg(&intf->dev, "CDC union len %u\n", 179 180 info->u->bLength); 180 181 goto bad_desc; ··· 232 233 goto bad_desc; 233 234 } 234 235 info->ether = (void *) buf; 235 - if (info->ether->bLength != sizeof *info->ether) { 236 + if (info->ether->bLength != sizeof(*info->ether)) { 236 237 dev_dbg(&intf->dev, "CDC ether len %u\n", 237 238 info->ether->bLength); 238 239 goto bad_desc; ··· 273 274 break; 274 275 } 275 276 next_desc: 276 - len -= buf [0]; /* bLength */ 277 - buf += buf [0]; 277 + len -= buf[0]; /* bLength */ 278 + buf += buf[0]; 278 279 } 279 280 280 281 /* Microsoft ActiveSync based and some regular RNDIS devices lack the ··· 378 379 } 379 380 EXPORT_SYMBOL_GPL(usbnet_cdc_unbind); 380 381 381 - /*------------------------------------------------------------------------- 382 - * 383 - * Communications Device Class, Ethernet Control model 382 + /* Communications Device Class, Ethernet Control model 384 383 * 385 384 * Takes two interfaces. The DATA interface is inactive till an altsetting 386 385 * is selected. Configuration data includes class descriptors. There's ··· 386 389 * 387 390 * This should interop with whatever the 2.4 "CDCEther.c" driver 388 391 * (by Brad Hards) talked with, with more functionality. 389 - * 390 - *-------------------------------------------------------------------------*/ 392 + */ 391 393 392 394 static void dumpspeed(struct usbnet *dev, __le32 *speeds) 393 395 { ··· 400 404 { 401 405 struct usb_cdc_notification *event; 402 406 403 - if (urb->actual_length < sizeof *event) 407 + if (urb->actual_length < sizeof(*event)) 404 408 return; 405 409 406 410 /* SPEED_CHANGE can get split into two 8-byte packets */ ··· 419 423 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 420 424 netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n", 421 425 urb->actual_length); 422 - if (urb->actual_length != (sizeof *event + 8)) 426 + if (urb->actual_length != (sizeof(*event) + 8)) 423 427 set_bit(EVENT_STS_SPLIT, &dev->flags); 424 428 else 425 429 dumpspeed(dev, (__le32 *) &event[1]); ··· 465 469 static const struct driver_info cdc_info = { 466 470 .description = "CDC Ethernet Device", 467 471 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 468 - // .check_connect = cdc_check_connect, 469 472 .bind = usbnet_cdc_bind, 470 473 .unbind = usbnet_cdc_unbind, 471 474 .status = usbnet_cdc_status, ··· 488 493 #define DELL_VENDOR_ID 0x413C 489 494 #define REALTEK_VENDOR_ID 0x0bda 490 495 491 - static const struct usb_device_id products [] = { 492 - /* 493 - * BLACKLIST !! 496 + static const struct usb_device_id products[] = { 497 + /* BLACKLIST !! 494 498 * 495 499 * First blacklist any products that are egregiously nonconformant 496 500 * with the CDC Ethernet specs. Minor braindamage we cope with; when ··· 536 542 .driver_info = 0, 537 543 }, { 538 544 .match_flags = USB_DEVICE_ID_MATCH_INT_INFO 539 - | USB_DEVICE_ID_MATCH_DEVICE, 545 + | USB_DEVICE_ID_MATCH_DEVICE, 540 546 .idVendor = 0x04DD, 541 547 .idProduct = 0x8007, /* C-700 */ 542 548 ZAURUS_MASTER_INTERFACE, ··· 653 659 .driver_info = 0, 654 660 }, 655 661 656 - /* 657 - * WHITELIST!!! 662 + /* WHITELIST!!! 658 663 * 659 664 * CDC Ether uses two interfaces, not necessarily consecutive. 660 665 * We match the main interface, ignoring the optional device ··· 665 672 */ 666 673 { 667 674 /* ZTE (Vodafone) K3805-Z */ 668 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 669 - | USB_DEVICE_ID_MATCH_PRODUCT 670 - | USB_DEVICE_ID_MATCH_INT_INFO, 671 - .idVendor = ZTE_VENDOR_ID, 672 - .idProduct = 0x1003, 673 - .bInterfaceClass = USB_CLASS_COMM, 674 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 675 - .bInterfaceProtocol = USB_CDC_PROTO_NONE, 675 + USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1003, USB_CLASS_COMM, 676 + USB_CDC_SUBCLASS_ETHERNET, 677 + USB_CDC_PROTO_NONE), 676 678 .driver_info = (unsigned long)&wwan_info, 677 679 }, { 678 680 /* ZTE (Vodafone) K3806-Z */ 679 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 680 - | USB_DEVICE_ID_MATCH_PRODUCT 681 - | USB_DEVICE_ID_MATCH_INT_INFO, 682 - .idVendor = ZTE_VENDOR_ID, 683 - .idProduct = 0x1015, 684 - .bInterfaceClass = USB_CLASS_COMM, 685 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 686 - .bInterfaceProtocol = USB_CDC_PROTO_NONE, 681 + USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1015, USB_CLASS_COMM, 682 + USB_CDC_SUBCLASS_ETHERNET, 683 + USB_CDC_PROTO_NONE), 687 684 .driver_info = (unsigned long)&wwan_info, 688 685 }, { 689 686 /* ZTE (Vodafone) K4510-Z */ 690 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 691 - | USB_DEVICE_ID_MATCH_PRODUCT 692 - | USB_DEVICE_ID_MATCH_INT_INFO, 693 - .idVendor = ZTE_VENDOR_ID, 694 - .idProduct = 0x1173, 695 - .bInterfaceClass = USB_CLASS_COMM, 696 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 697 - .bInterfaceProtocol = USB_CDC_PROTO_NONE, 687 + USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1173, USB_CLASS_COMM, 688 + USB_CDC_SUBCLASS_ETHERNET, 689 + USB_CDC_PROTO_NONE), 698 690 .driver_info = (unsigned long)&wwan_info, 699 691 }, { 700 692 /* ZTE (Vodafone) K3770-Z */ 701 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 702 - | USB_DEVICE_ID_MATCH_PRODUCT 703 - | USB_DEVICE_ID_MATCH_INT_INFO, 704 - .idVendor = ZTE_VENDOR_ID, 705 - .idProduct = 0x1177, 706 - .bInterfaceClass = USB_CLASS_COMM, 707 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 708 - .bInterfaceProtocol = USB_CDC_PROTO_NONE, 693 + USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1177, USB_CLASS_COMM, 694 + USB_CDC_SUBCLASS_ETHERNET, 695 + USB_CDC_PROTO_NONE), 709 696 .driver_info = (unsigned long)&wwan_info, 710 697 }, { 711 698 /* ZTE (Vodafone) K3772-Z */ 712 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 713 - | USB_DEVICE_ID_MATCH_PRODUCT 714 - | USB_DEVICE_ID_MATCH_INT_INFO, 715 - .idVendor = ZTE_VENDOR_ID, 716 - .idProduct = 0x1181, 717 - .bInterfaceClass = USB_CLASS_COMM, 718 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 719 - .bInterfaceProtocol = USB_CDC_PROTO_NONE, 699 + USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1181, USB_CLASS_COMM, 700 + USB_CDC_SUBCLASS_ETHERNET, 701 + USB_CDC_PROTO_NONE), 720 702 .driver_info = (unsigned long)&wwan_info, 703 + }, { 704 + /* Telit modules */ 705 + USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM, 706 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 707 + .driver_info = (kernel_ulong_t) &wwan_info, 721 708 }, { 722 709 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 723 710 USB_CDC_PROTO_NONE), ··· 709 736 710 737 }, { 711 738 /* Various Huawei modems with a network port like the UMG1831 */ 712 - .match_flags = USB_DEVICE_ID_MATCH_VENDOR 713 - | USB_DEVICE_ID_MATCH_INT_INFO, 714 - .idVendor = HUAWEI_VENDOR_ID, 715 - .bInterfaceClass = USB_CLASS_COMM, 716 - .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, 717 - .bInterfaceProtocol = 255, 739 + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_COMM, 740 + USB_CDC_SUBCLASS_ETHERNET, 255), 718 741 .driver_info = (unsigned long)&wwan_info, 719 742 }, 720 - { }, // END 743 + { }, /* END */ 721 744 }; 722 745 MODULE_DEVICE_TABLE(usb, products); 723 746
+21 -19
drivers/net/vxlan.c
··· 564 564 struct net_device *dev; 565 565 struct net *net = sock_net(sk); 566 566 sa_family_t sa_family = sk->sk_family; 567 - u16 port = htons(inet_sk(sk)->inet_sport); 567 + __be16 port = inet_sk(sk)->inet_sport; 568 568 569 569 rcu_read_lock(); 570 570 for_each_netdev_rcu(net, dev) { ··· 581 581 struct net_device *dev; 582 582 struct net *net = sock_net(sk); 583 583 sa_family_t sa_family = sk->sk_family; 584 - u16 port = htons(inet_sk(sk)->inet_sport); 584 + __be16 port = inet_sk(sk)->inet_sport; 585 585 586 586 rcu_read_lock(); 587 587 for_each_netdev_rcu(net, dev) { ··· 2021 2021 }; 2022 2022 2023 2023 /* Calls the ndo_add_vxlan_port of the caller in order to 2024 - * supply the listening VXLAN udp ports. 2024 + * supply the listening VXLAN udp ports. Callers are expected 2025 + * to implement the ndo_add_vxlan_port. 2025 2026 */ 2026 2027 void vxlan_get_rx_port(struct net_device *dev) 2027 2028 { ··· 2030 2029 struct net *net = dev_net(dev); 2031 2030 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2032 2031 sa_family_t sa_family; 2033 - u16 port; 2034 - int i; 2035 - 2036 - if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port) 2037 - return; 2032 + __be16 port; 2033 + unsigned int i; 2038 2034 2039 2035 spin_lock(&vn->sock_lock); 2040 2036 for (i = 0; i < PORT_HASH_SIZE; ++i) { 2041 - hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) { 2042 - port = htons(inet_sk(vs->sock->sk)->inet_sport); 2037 + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { 2038 + port = inet_sk(vs->sock->sk)->inet_sport; 2043 2039 sa_family = vs->sock->sk->sk_family; 2044 2040 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, 2045 2041 port); ··· 2490 2492 2491 2493 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops); 2492 2494 2493 - /* create an fdb entry for default destination */ 2494 - err = vxlan_fdb_create(vxlan, all_zeros_mac, 2495 - &vxlan->default_dst.remote_ip, 2496 - NUD_REACHABLE|NUD_PERMANENT, 2497 - NLM_F_EXCL|NLM_F_CREATE, 2498 - vxlan->dst_port, vxlan->default_dst.remote_vni, 2499 - vxlan->default_dst.remote_ifindex, NTF_SELF); 2500 - if (err) 2501 - return err; 2495 + /* create an fdb entry for a valid default destination */ 2496 + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { 2497 + err = vxlan_fdb_create(vxlan, all_zeros_mac, 2498 + &vxlan->default_dst.remote_ip, 2499 + NUD_REACHABLE|NUD_PERMANENT, 2500 + NLM_F_EXCL|NLM_F_CREATE, 2501 + vxlan->dst_port, 2502 + vxlan->default_dst.remote_vni, 2503 + vxlan->default_dst.remote_ifindex, 2504 + NTF_SELF); 2505 + if (err) 2506 + return err; 2507 + } 2502 2508 2503 2509 err = register_netdevice(dev); 2504 2510 if (err) {
+2 -2
drivers/net/wireless/brcm80211/Kconfig
··· 28 28 29 29 config BRCMFMAC_SDIO 30 30 bool "SDIO bus interface support for FullMAC driver" 31 - depends on MMC 31 + depends on (MMC = y || MMC = BRCMFMAC) 32 32 depends on BRCMFMAC 33 33 select FW_LOADER 34 34 default y ··· 39 39 40 40 config BRCMFMAC_USB 41 41 bool "USB bus interface support for FullMAC driver" 42 - depends on USB 42 + depends on (USB = y || USB = BRCMFMAC) 43 43 depends on BRCMFMAC 44 44 select FW_LOADER 45 45 ---help---
+25 -3
drivers/net/wireless/cw1200/cw1200_spi.c
··· 40 40 struct cw1200_common *core; 41 41 const struct cw1200_platform_data_spi *pdata; 42 42 spinlock_t lock; /* Serialize all bus operations */ 43 + wait_queue_head_t wq; 43 44 int claimed; 45 + int irq_disabled; 44 46 }; 45 47 46 48 #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) ··· 199 197 { 200 198 unsigned long flags; 201 199 200 + DECLARE_WAITQUEUE(wait, current); 201 + 202 202 might_sleep(); 203 203 204 + add_wait_queue(&self->wq, &wait); 204 205 spin_lock_irqsave(&self->lock, flags); 205 206 while (1) { 206 207 set_current_state(TASK_UNINTERRUPTIBLE); ··· 216 211 set_current_state(TASK_RUNNING); 217 212 self->claimed = 1; 218 213 spin_unlock_irqrestore(&self->lock, flags); 214 + remove_wait_queue(&self->wq, &wait); 219 215 220 216 return; 221 217 } ··· 228 222 spin_lock_irqsave(&self->lock, flags); 229 223 self->claimed = 0; 230 224 spin_unlock_irqrestore(&self->lock, flags); 225 + wake_up(&self->wq); 226 + 231 227 return; 232 228 } 233 229 ··· 238 230 struct hwbus_priv *self = dev_id; 239 231 240 232 if (self->core) { 233 + disable_irq_nosync(self->func->irq); 234 + self->irq_disabled = 1; 241 235 cw1200_irq_handler(self->core); 242 236 return IRQ_HANDLED; 243 237 } else { ··· 273 263 274 264 static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) 275 265 { 276 - int ret = 0; 277 - 278 266 pr_debug("SW IRQ unsubscribe\n"); 279 267 disable_irq_wake(self->func->irq); 280 268 free_irq(self->func->irq, self); 281 269 282 - return ret; 270 + return 0; 271 + } 272 + 273 + static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable) 274 + { 275 + /* Disables are handled by the interrupt handler */ 276 + if (enable && self->irq_disabled) { 277 + enable_irq(self->func->irq); 278 + self->irq_disabled = 0; 279 + } 280 + 281 + return 0; 283 282 } 284 283 285 284 static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) ··· 368 349 .unlock = cw1200_spi_unlock, 369 350 .align_size = cw1200_spi_align_size, 370 351 .power_mgmt = cw1200_spi_pm, 352 + .irq_enable = cw1200_spi_irq_enable, 371 353 }; 372 354 373 355 /* Probe Function to be called by SPI stack when device is discovered */ ··· 419 399 spin_lock_init(&self->lock); 420 400 421 401 spi_set_drvdata(func, self); 402 + 403 + init_waitqueue_head(&self->wq); 422 404 423 405 status = cw1200_spi_irq_subscribe(self); 424 406
+1 -1
drivers/net/wireless/cw1200/fwio.c
··· 485 485 486 486 /* Enable interrupt signalling */ 487 487 priv->hwbus_ops->lock(priv->hwbus_priv); 488 - ret = __cw1200_irq_enable(priv, 1); 488 + ret = __cw1200_irq_enable(priv, 2); 489 489 priv->hwbus_ops->unlock(priv->hwbus_priv); 490 490 if (ret < 0) 491 491 goto unsubscribe;
+1
drivers/net/wireless/cw1200/hwbus.h
··· 28 28 void (*unlock)(struct hwbus_priv *self); 29 29 size_t (*align_size)(struct hwbus_priv *self, size_t size); 30 30 int (*power_mgmt)(struct hwbus_priv *self, bool suspend); 31 + int (*irq_enable)(struct hwbus_priv *self, int enable); 31 32 }; 32 33 33 34 #endif /* CW1200_HWBUS_H */
+15
drivers/net/wireless/cw1200/hwio.c
··· 273 273 u16 val16; 274 274 int ret; 275 275 276 + /* We need to do this hack because the SPI layer can sleep on I/O 277 + and the general path involves I/O to the device in interrupt 278 + context. 279 + 280 + However, the initial enable call needs to go to the hardware. 281 + 282 + We don't worry about shutdown because we do a full reset which 283 + clears the interrupt enabled bits. 284 + */ 285 + if (priv->hwbus_ops->irq_enable) { 286 + ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable); 287 + if (ret || enable < 2) 288 + return ret; 289 + } 290 + 276 291 if (HIF_8601_SILICON == priv->hw_type) { 277 292 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); 278 293 if (ret < 0) {
+6 -5
drivers/net/wireless/rt2x00/rt2800lib.c
··· 6659 6659 rt2800_init_registers(rt2x00dev))) 6660 6660 return -EIO; 6661 6661 6662 + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev))) 6663 + return -EIO; 6664 + 6662 6665 /* 6663 6666 * Send signal to firmware during boot time. 6664 6667 */ 6665 6668 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 6666 6669 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 6667 - if (rt2x00_is_usb(rt2x00dev)) { 6670 + if (rt2x00_is_usb(rt2x00dev)) 6668 6671 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); 6669 - rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 6670 - } 6672 + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 6671 6673 msleep(1); 6672 6674 6673 - if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || 6674 - rt2800_wait_bbp_ready(rt2x00dev))) 6675 + if (unlikely(rt2800_wait_bbp_ready(rt2x00dev))) 6675 6676 return -EIO; 6676 6677 6677 6678 rt2800_init_bbp(rt2x00dev);
+10 -5
drivers/net/wireless/rtl818x/rtl8187/dev.c
··· 438 438 skb_queue_tail(&priv->rx_queue, skb); 439 439 usb_anchor_urb(entry, &priv->anchored); 440 440 ret = usb_submit_urb(entry, GFP_KERNEL); 441 + usb_put_urb(entry); 441 442 if (ret) { 442 443 skb_unlink(skb, &priv->rx_queue); 443 444 usb_unanchor_urb(entry); 444 445 goto err; 445 446 } 446 - usb_free_urb(entry); 447 447 } 448 448 return ret; 449 449 450 450 err: 451 - usb_free_urb(entry); 452 451 kfree_skb(skb); 453 452 usb_kill_anchored_urbs(&priv->anchored); 454 453 return ret; ··· 955 956 (RETRY_COUNT << 8 /* short retry limit */) | 956 957 (RETRY_COUNT << 0 /* long retry limit */) | 957 958 (7 << 21 /* MAX TX DMA */)); 958 - rtl8187_init_urbs(dev); 959 - rtl8187b_init_status_urb(dev); 959 + ret = rtl8187_init_urbs(dev); 960 + if (ret) 961 + goto rtl8187_start_exit; 962 + ret = rtl8187b_init_status_urb(dev); 963 + if (ret) 964 + usb_kill_anchored_urbs(&priv->anchored); 960 965 goto rtl8187_start_exit; 961 966 } 962 967 ··· 969 966 rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); 970 967 rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); 971 968 972 - rtl8187_init_urbs(dev); 969 + ret = rtl8187_init_urbs(dev); 970 + if (ret) 971 + goto rtl8187_start_exit; 973 972 974 973 reg = RTL818X_RX_CONF_ONLYERLPKT | 975 974 RTL818X_RX_CONF_RX_AUTORESETPHY |
+1
drivers/net/xen-netback/common.h
··· 184 184 unsigned long rx_ring_ref, unsigned int tx_evtchn, 185 185 unsigned int rx_evtchn); 186 186 void xenvif_disconnect(struct xenvif *vif); 187 + void xenvif_free(struct xenvif *vif); 187 188 188 189 int xenvif_xenbus_init(void); 189 190 void xenvif_xenbus_fini(void);
+11 -17
drivers/net/xen-netback/interface.c
··· 353 353 } 354 354 355 355 netdev_dbg(dev, "Successfully created xenvif\n"); 356 + 357 + __module_get(THIS_MODULE); 358 + 356 359 return vif; 357 360 } 358 361 ··· 368 365 /* Already connected through? */ 369 366 if (vif->tx_irq) 370 367 return 0; 371 - 372 - __module_get(THIS_MODULE); 373 368 374 369 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); 375 370 if (err < 0) ··· 407 406 408 407 init_waitqueue_head(&vif->wq); 409 408 vif->task = kthread_create(xenvif_kthread, 410 - (void *)vif, vif->dev->name); 409 + (void *)vif, "%s", vif->dev->name); 411 410 if (IS_ERR(vif->task)) { 412 411 pr_warn("Could not allocate kthread for %s\n", vif->dev->name); 413 412 err = PTR_ERR(vif->task); ··· 453 452 454 453 void xenvif_disconnect(struct xenvif *vif) 455 454 { 456 - /* Disconnect funtion might get called by generic framework 457 - * even before vif connects, so we need to check if we really 458 - * need to do a module_put. 459 - */ 460 - int need_module_put = 0; 461 - 462 455 if (netif_carrier_ok(vif->dev)) 463 456 xenvif_carrier_off(vif); 464 457 ··· 463 468 unbind_from_irqhandler(vif->tx_irq, vif); 464 469 unbind_from_irqhandler(vif->rx_irq, vif); 465 470 } 466 - /* vif->irq is valid, we had a module_get in 467 - * xenvif_connect. 468 - */ 469 - need_module_put = 1; 471 + vif->tx_irq = 0; 470 472 } 471 473 472 474 if (vif->task) 473 475 kthread_stop(vif->task); 474 476 477 + xenvif_unmap_frontend_rings(vif); 478 + } 479 + 480 + void xenvif_free(struct xenvif *vif) 481 + { 475 482 netif_napi_del(&vif->napi); 476 483 477 484 unregister_netdev(vif->dev); 478 485 479 - xenvif_unmap_frontend_rings(vif); 480 - 481 486 free_netdev(vif->dev); 482 487 483 - if (need_module_put) 484 - module_put(THIS_MODULE); 488 + module_put(THIS_MODULE); 485 489 }
+65 -31
drivers/net/xen-netback/netback.c
··· 212 212 return false; 213 213 } 214 214 215 + struct xenvif_count_slot_state { 216 + unsigned long copy_off; 217 + bool head; 218 + }; 219 + 220 + unsigned int xenvif_count_frag_slots(struct xenvif *vif, 221 + unsigned long offset, unsigned long size, 222 + struct xenvif_count_slot_state *state) 223 + { 224 + unsigned count = 0; 225 + 226 + offset &= ~PAGE_MASK; 227 + 228 + while (size > 0) { 229 + unsigned long bytes; 230 + 231 + bytes = PAGE_SIZE - offset; 232 + 233 + if (bytes > size) 234 + bytes = size; 235 + 236 + if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { 237 + count++; 238 + state->copy_off = 0; 239 + } 240 + 241 + if (state->copy_off + bytes > MAX_BUFFER_OFFSET) 242 + bytes = MAX_BUFFER_OFFSET - state->copy_off; 243 + 244 + state->copy_off += bytes; 245 + 246 + offset += bytes; 247 + size -= bytes; 248 + 249 + if (offset == PAGE_SIZE) 250 + offset = 0; 251 + 252 + state->head = false; 253 + } 254 + 255 + return count; 256 + } 257 + 215 258 /* 216 259 * Figure out how many ring slots we're going to need to send @skb to 217 260 * the guest. This function is essentially a dry run of ··· 262 219 */ 263 220 unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) 264 221 { 222 + struct xenvif_count_slot_state state; 265 223 unsigned int count; 266 - int i, copy_off; 224 + unsigned char *data; 225 + unsigned i; 267 226 268 - count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); 227 + state.head = true; 228 + state.copy_off = 0; 269 229 270 - copy_off = skb_headlen(skb) % PAGE_SIZE; 230 + /* Slot for the first (partial) page of data. */ 231 + count = 1; 271 232 233 + /* Need a slot for the GSO prefix for GSO extra data? */ 272 234 if (skb_shinfo(skb)->gso_size) 273 235 count++; 236 + 237 + data = skb->data; 238 + while (data < skb_tail_pointer(skb)) { 239 + unsigned long offset = offset_in_page(data); 240 + unsigned long size = PAGE_SIZE - offset; 241 + 242 + if (data + size > skb_tail_pointer(skb)) 243 + size = skb_tail_pointer(skb) - data; 244 + 245 + count += xenvif_count_frag_slots(vif, offset, size, &state); 246 + 247 + data += size; 248 + } 274 249 275 250 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 276 251 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 277 252 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; 278 - unsigned long bytes; 279 253 280 - offset &= ~PAGE_MASK; 281 - 282 - while (size > 0) { 283 - BUG_ON(offset >= PAGE_SIZE); 284 - BUG_ON(copy_off > MAX_BUFFER_OFFSET); 285 - 286 - bytes = PAGE_SIZE - offset; 287 - 288 - if (bytes > size) 289 - bytes = size; 290 - 291 - if (start_new_rx_buffer(copy_off, bytes, 0)) { 292 - count++; 293 - copy_off = 0; 294 - } 295 - 296 - if (copy_off + bytes > MAX_BUFFER_OFFSET) 297 - bytes = MAX_BUFFER_OFFSET - copy_off; 298 - 299 - copy_off += bytes; 300 - 301 - offset += bytes; 302 - size -= bytes; 303 - 304 - if (offset == PAGE_SIZE) 305 - offset = 0; 306 - } 254 + count += xenvif_count_frag_slots(vif, offset, size, &state); 307 255 } 308 256 return count; 309 257 }
+13 -6
drivers/net/xen-netback/xenbus.c
··· 42 42 if (be->vif) { 43 43 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 44 44 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 45 - xenvif_disconnect(be->vif); 45 + xenvif_free(be->vif); 46 46 be->vif = NULL; 47 47 } 48 48 kfree(be); ··· 213 213 { 214 214 struct backend_info *be = dev_get_drvdata(&dev->dev); 215 215 216 - if (be->vif) { 217 - xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 216 + if (be->vif) 218 217 xenvif_disconnect(be->vif); 218 + } 219 + 220 + static void destroy_backend(struct xenbus_device *dev) 221 + { 222 + struct backend_info *be = dev_get_drvdata(&dev->dev); 223 + 224 + if (be->vif) { 225 + kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 226 + xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); 227 + xenvif_free(be->vif); 219 228 be->vif = NULL; 220 229 } 221 230 } ··· 255 246 case XenbusStateConnected: 256 247 if (dev->state == XenbusStateConnected) 257 248 break; 258 - backend_create_xenvif(be); 259 249 if (be->vif) 260 250 connect(be); 261 251 break; 262 252 263 253 case XenbusStateClosing: 264 - if (be->vif) 265 - kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); 266 254 disconnect_backend(dev); 267 255 xenbus_switch_state(dev, XenbusStateClosing); 268 256 break; ··· 268 262 xenbus_switch_state(dev, XenbusStateClosed); 269 263 if (xenbus_dev_is_online(dev)) 270 264 break; 265 + destroy_backend(dev); 271 266 /* fall through if not online */ 272 267 case XenbusStateUnknown: 273 268 device_unregister(&dev->dev);
+3 -3
drivers/pci/pci-acpi.c
··· 47 47 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) 48 48 return; 49 49 50 + if (pci_dev->pme_poll) 51 + pci_dev->pme_poll = false; 52 + 50 53 if (pci_dev->current_state == PCI_D3cold) { 51 54 pci_wakeup_event(pci_dev); 52 55 pm_runtime_resume(&pci_dev->dev); ··· 59 56 /* Clear PME Status if set. */ 60 57 if (pci_dev->pme_support) 61 58 pci_check_pme_status(pci_dev); 62 - 63 - if (pci_dev->pme_poll) 64 - pci_dev->pme_poll = false; 65 59 66 60 pci_wakeup_event(pci_dev); 67 61 pm_runtime_resume(&pci_dev->dev);
+1 -1
drivers/scsi/bnx2fc/bnx2fc.h
··· 105 105 #define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) 106 106 #define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) 107 107 #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) 108 - #define BNX2FC_5771X_DB_PAGE_SIZE 128 108 + #define BNX2X_DB_SHIFT 3 109 109 110 110 #define BNX2FC_TASK_SIZE 128 111 111 #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+1 -2
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1421 1421 1422 1422 reg_base = pci_resource_start(hba->pcidev, 1423 1423 BNX2X_DOORBELL_PCI_BAR); 1424 - reg_off = BNX2FC_5771X_DB_PAGE_SIZE * 1425 - (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; 1424 + reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); 1426 1425 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); 1427 1426 if (!tgt->ctx_base) 1428 1427 return -ENOMEM;
+1 -1
drivers/scsi/bnx2i/bnx2i.h
··· 64 64 #define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 65 65 #define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 66 66 67 - #define BNX2I_5771X_DBELL_PAGE_SIZE 128 67 + #define BNX2X_DB_SHIFT 3 68 68 69 69 /* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ 70 70 #define MAX_BD_LENGTH 65535
+1 -2
drivers/scsi/bnx2i/bnx2i_hwi.c
··· 2738 2738 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 2739 2739 reg_base = pci_resource_start(ep->hba->pcidev, 2740 2740 BNX2X_DOORBELL_PCI_BAR); 2741 - reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) + 2742 - DPM_TRIGER_TYPE; 2741 + reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); 2743 2742 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); 2744 2743 goto arm_cq; 2745 2744 }
+17 -16
drivers/staging/comedi/Kconfig
··· 96 96 To compile this driver as a module, choose M here: the module will be 97 97 called skel. 98 98 99 + config COMEDI_SSV_DNP 100 + tristate "SSV Embedded Systems DIL/Net-PC support" 101 + depends on X86_32 || COMPILE_TEST 102 + ---help--- 103 + Enable support for SSV Embedded Systems DIL/Net-PC 104 + 105 + To compile this driver as a module, choose M here: the module will be 106 + called ssv_dnp. 107 + 99 108 endif # COMEDI_MISC_DRIVERS 100 109 101 110 menuconfig COMEDI_ISA_DRIVERS ··· 394 385 395 386 To compile this driver as a module, choose M here: the module will be 396 387 called dmm32at. 388 + 389 + config COMEDI_UNIOXX5 390 + tristate "Fastwel UNIOxx-5 analog and digital io board support" 391 + ---help--- 392 + Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards 393 + 394 + To compile this driver as a module, choose M here: the module will be 395 + called unioxx5. 397 396 398 397 config COMEDI_FL512 399 398 tristate "FL512 ISA card support" ··· 872 855 To compile this driver as a module, choose M here: the module will be 873 856 called dyna_pci10xx. 874 857 875 - config COMEDI_UNIOXX5 876 - tristate "Fastwel UNIOxx-5 analog and digital io board support" 877 - ---help--- 878 - Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards 879 - 880 - To compile this driver as a module, choose M here: the module will be 881 - called unioxx5. 882 - 883 858 config COMEDI_GSC_HPDI 884 859 tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support" 885 860 select COMEDI_FC ··· 1093 1084 1094 1085 To compile this driver as a module, choose M here: the module will be 1095 1086 called s626. 1096 - 1097 - config COMEDI_SSV_DNP 1098 - tristate "SSV Embedded Systems DIL/Net-PC support" 1099 - ---help--- 1100 - Enable support for SSV Embedded Systems DIL/Net-PC 1101 - 1102 - To compile this driver as a module, choose M here: the module will be 1103 - called ssv_dnp. 1104 1087 1105 1088 config COMEDI_MITE 1106 1089 depends on HAS_DMA
+10 -7
drivers/staging/dgap/dgap_driver.c
··· 470 470 471 471 DGAP_LOCK(dgap_global_lock, flags); 472 472 brd->msgbuf = NULL; 473 - printk(brd->msgbuf_head); 473 + printk("%s", brd->msgbuf_head); 474 474 kfree(brd->msgbuf_head); 475 475 brd->msgbuf_head = NULL; 476 476 DGAP_UNLOCK(dgap_global_lock, flags); ··· 624 624 DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i)); 625 625 DGAP_LOCK(dgap_global_lock, flags); 626 626 brd->msgbuf = NULL; 627 - printk(brd->msgbuf_head); 627 + printk("%s", brd->msgbuf_head); 628 628 kfree(brd->msgbuf_head); 629 629 brd->msgbuf_head = NULL; 630 630 DGAP_UNLOCK(dgap_global_lock, flags); ··· 951 951 char buf[1024]; 952 952 int i; 953 953 unsigned long flags; 954 + size_t length; 954 955 955 956 DGAP_LOCK(dgap_global_lock, flags); 956 957 957 958 /* Format buf using fmt and arguments contained in ap. */ 958 959 va_start(ap, fmt); 959 - i = vsprintf(buf, fmt, ap); 960 + i = vsnprintf(buf, sizeof(buf), fmt, ap); 960 961 va_end(ap); 961 962 962 963 DPR((buf)); 963 964 964 965 if (!brd || !brd->msgbuf) { 965 - printk(buf); 966 + printk("%s", buf); 966 967 DGAP_UNLOCK(dgap_global_lock, flags); 967 968 return; 968 969 } 969 970 970 - memcpy(brd->msgbuf, buf, strlen(buf)); 971 - brd->msgbuf += strlen(buf); 972 - *brd->msgbuf = 0; 971 + length = strlen(buf) + 1; 972 + if (brd->msgbuf - brd->msgbuf_head < length) 973 + length = brd->msgbuf - brd->msgbuf_head; 974 + memcpy(brd->msgbuf, buf, length); 975 + brd->msgbuf += length; 973 976 974 977 DGAP_UNLOCK(dgap_global_lock, flags); 975 978 }
+2 -2
drivers/staging/dgnc/dgnc_driver.c
··· 454 454 455 455 DGNC_LOCK(dgnc_global_lock, flags); 456 456 brd->msgbuf = NULL; 457 - printk(brd->msgbuf_head); 457 + printk("%s", brd->msgbuf_head); 458 458 kfree(brd->msgbuf_head); 459 459 brd->msgbuf_head = NULL; 460 460 DGNC_UNLOCK(dgnc_global_lock, flags); ··· 710 710 DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i)); 711 711 DGNC_LOCK(dgnc_global_lock, flags); 712 712 brd->msgbuf = NULL; 713 - printk(brd->msgbuf_head); 713 + printk("%s", brd->msgbuf_head); 714 714 kfree(brd->msgbuf_head); 715 715 brd->msgbuf_head = NULL; 716 716 DGNC_UNLOCK(dgnc_global_lock, flags);
+1 -1
drivers/staging/iio/Kconfig
··· 37 37 38 38 config IIO_SIMPLE_DUMMY_BUFFER 39 39 boolean "Buffered capture support" 40 - depends on IIO_KFIFO_BUF 40 + select IIO_KFIFO_BUF 41 41 help 42 42 Add buffered data capture to the simple dummy driver. 43 43
+1
drivers/staging/iio/light/isl29018.c
··· 563 563 mutex_init(&chip->lock); 564 564 565 565 chip->lux_scale = 1; 566 + chip->lux_uscale = 0; 566 567 chip->range = 1000; 567 568 chip->adc_bit = 16; 568 569 chip->suspended = false;
+1 -1
drivers/staging/iio/magnetometer/hmc5843.c
··· 229 229 if (result < 0) 230 230 return -EINVAL; 231 231 232 - *val = result; 232 + *val = sign_extend32(result, 15); 233 233 return IIO_VAL_INT; 234 234 } 235 235
+1 -1
drivers/staging/iio/meter/ade7854-spi.c
··· 297 297 298 298 ret = ade7854_probe(indio_dev, &spi->dev); 299 299 300 - return 0; 300 + return ret; 301 301 } 302 302 303 303 static int ade7854_spi_remove(struct spi_device *spi)
+7 -3
drivers/staging/line6/toneport.c
··· 244 244 struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); 245 245 struct usb_line6_toneport *toneport = 246 246 (struct usb_line6_toneport *)line6pcm->line6; 247 + unsigned int source; 247 248 248 - if (ucontrol->value.enumerated.item[0] == toneport->source) 249 + source = ucontrol->value.enumerated.item[0]; 250 + if (source >= ARRAY_SIZE(toneport_source_info)) 251 + return -EINVAL; 252 + if (source == toneport->source) 249 253 return 0; 250 254 251 - toneport->source = ucontrol->value.enumerated.item[0]; 255 + toneport->source = source; 252 256 toneport_send_cmd(toneport->line6.usbdev, 253 - toneport_source_info[toneport->source].code, 0x0000); 257 + toneport_source_info[source].code, 0x0000); 254 258 return 1; 255 259 } 256 260
+1 -1
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
··· 1802 1802 int 1803 1803 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) 1804 1804 { 1805 - struct task_struct *task = kthread_run(fn, arg, name); 1805 + struct task_struct *task = kthread_run(fn, arg, "%s", name); 1806 1806 1807 1807 if (IS_ERR(task)) 1808 1808 return PTR_ERR(task);
+1 -1
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
··· 1005 1005 int 1006 1006 ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) 1007 1007 { 1008 - struct task_struct *task = kthread_run(fn, arg, name); 1008 + struct task_struct *task = kthread_run(fn, arg, "%s", name); 1009 1009 1010 1010 if (IS_ERR(task)) 1011 1011 return PTR_ERR(task);
+2 -2
drivers/staging/lustre/lustre/Kconfig
··· 1 1 config LUSTRE_FS 2 2 tristate "Lustre file system client support" 3 - depends on INET && m 3 + depends on INET && m && !MIPS && !XTENSA && !SUPERH 4 4 select LNET 5 5 select CRYPTO 6 6 select CRYPTO_CRC32 ··· 52 52 config LUSTRE_TRANSLATE_ERRNOS 53 53 bool 54 54 depends on LUSTRE_FS && !X86 55 - default true 55 + default y 56 56 57 57 config LUSTRE_LLITE_LLOOP 58 58 bool "Lustre virtual block device"
+2 -2
drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
··· 800 800 801 801 init_completion(&bltd.bltd_comp); 802 802 bltd.bltd_num = atomic_read(&blp->blp_num_threads); 803 - snprintf(bltd.bltd_name, sizeof(bltd.bltd_name) - 1, 803 + snprintf(bltd.bltd_name, sizeof(bltd.bltd_name), 804 804 "ldlm_bl_%02d", bltd.bltd_num); 805 - task = kthread_run(ldlm_bl_thread_main, &bltd, bltd.bltd_name); 805 + task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name); 806 806 if (IS_ERR(task)) { 807 807 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n", 808 808 atomic_read(&blp->blp_num_threads), PTR_ERR(task));
+1 -1
drivers/staging/lustre/lustre/libcfs/workitem.c
··· 397 397 sched->ws_name, sched->ws_nthreads); 398 398 } 399 399 400 - task = kthread_run(cfs_wi_scheduler, sched, name); 400 + task = kthread_run(cfs_wi_scheduler, sched, "%s", name); 401 401 if (!IS_ERR(task)) { 402 402 nthrs--; 403 403 continue;
+2 -2
drivers/staging/lustre/lustre/ptlrpc/pinger.c
··· 383 383 384 384 /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we 385 385 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */ 386 - rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, 387 - &pinger_thread, pinger_thread.t_name)); 386 + rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread, 387 + "%s", pinger_thread.t_name)); 388 388 if (IS_ERR_VALUE(rc)) { 389 389 CERROR("cannot start thread: %d\n", rc); 390 390 return rc;
+4 -4
drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
··· 615 615 init_completion(&pc->pc_starting); 616 616 init_completion(&pc->pc_finishing); 617 617 spin_lock_init(&pc->pc_lock); 618 - strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1); 618 + strlcpy(pc->pc_name, name, sizeof(pc->pc_name)); 619 619 pc->pc_set = ptlrpc_prep_set(); 620 620 if (pc->pc_set == NULL) 621 621 GOTO(out, rc = -ENOMEM); ··· 638 638 GOTO(out, rc); 639 639 } 640 640 641 - task = kthread_run(ptlrpcd, pc, pc->pc_name); 641 + task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name); 642 642 if (IS_ERR(task)) 643 643 GOTO(out, rc = PTR_ERR(task)); 644 644 ··· 745 745 if (ptlrpcds == NULL) 746 746 GOTO(out, rc = -ENOMEM); 747 747 748 - snprintf(name, 15, "ptlrpcd_rcv"); 748 + snprintf(name, sizeof(name), "ptlrpcd_rcv"); 749 749 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags); 750 750 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv); 751 751 if (rc < 0) ··· 764 764 * unnecessary dependency. But how to distribute async RPCs load 765 765 * among all the ptlrpc daemons becomes another trouble. */ 766 766 for (i = 0; i < nthreads; i++) { 767 - snprintf(name, 15, "ptlrpcd_%d", i); 767 + snprintf(name, sizeof(name), "ptlrpcd_%d", i); 768 768 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]); 769 769 if (rc < 0) 770 770 GOTO(out, rc);
+2 -2
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
··· 59 59 ****************************************/ 60 60 61 61 62 - #define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 63 - #define PAGES_PER_POOL (PTRS_PER_PAGE) 62 + #define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) 63 + #define PAGES_PER_POOL (POINTERS_PER_PAGE) 64 64 65 65 #define IDLE_IDX_MAX (100) 66 66 #define IDLE_IDX_WEIGHT (3)
+3 -3
drivers/staging/lustre/lustre/ptlrpc/service.c
··· 2718 2718 spin_unlock(&svcpt->scp_lock); 2719 2719 2720 2720 if (svcpt->scp_cpt >= 0) { 2721 - snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d", 2721 + snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d", 2722 2722 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); 2723 2723 } else { 2724 - snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s_%04d", 2724 + snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d", 2725 2725 svc->srv_thread_name, thread->t_id); 2726 2726 } 2727 2727 2728 2728 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); 2729 - rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name)); 2729 + rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name)); 2730 2730 if (IS_ERR_VALUE(rc)) { 2731 2731 CERROR("cannot start thread '%s': rc %d\n", 2732 2732 thread->t_name, rc);
+1 -6
drivers/staging/octeon/ethernet-mem.c
··· 48 48 while (freed) { 49 49 50 50 struct sk_buff *skb = dev_alloc_skb(size + 256); 51 - if (unlikely(skb == NULL)) { 52 - pr_warning 53 - ("Failed to allocate skb for hardware pool %d\n", 54 - pool); 51 + if (unlikely(skb == NULL)) 55 52 break; 56 - } 57 - 58 53 skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); 59 54 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; 60 55 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
+1 -3
drivers/staging/octeon/ethernet-rgmii.c
··· 373 373 * Enable interrupts on inband status changes 374 374 * for this port. 375 375 */ 376 - gmx_rx_int_en.u64 = 377 - cvmx_read_csr(CVMX_GMXX_RXX_INT_EN 378 - (index, interface)); 376 + gmx_rx_int_en.u64 = 0; 379 377 gmx_rx_int_en.s.phy_dupx = 1; 380 378 gmx_rx_int_en.s.phy_link = 1; 381 379 gmx_rx_int_en.s.phy_spd = 1;
+1 -4
drivers/staging/octeon/ethernet-rx.c
··· 303 303 if (backlog > budget * cores_in_use && napi != NULL) 304 304 cvm_oct_enable_one_cpu(); 305 305 } 306 + rx_count++; 306 307 307 308 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 308 309 if (likely(skb_in_hw)) { ··· 337 336 */ 338 337 skb = dev_alloc_skb(work->len); 339 338 if (!skb) { 340 - printk_ratelimited("Port %d failed to allocate " 341 - "skbuff, packet dropped\n", 342 - work->ipprt); 343 339 cvm_oct_free_work(work); 344 340 continue; 345 341 } ··· 427 429 #endif 428 430 } 429 431 netif_receive_skb(skb); 430 - rx_count++; 431 432 } else { 432 433 /* Drop any packet received for a device that isn't up */ 433 434 /*
+7 -7
drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
··· 1827 1827 1828 1828 #ifdef CONFIG_88EU_P2P 1829 1829 1830 - static int get_reg_classes_full_count(struct p2p_channels channel_list) 1830 + static int get_reg_classes_full_count(struct p2p_channels *channel_list) 1831 1831 { 1832 1832 int cnt = 0; 1833 1833 int i; 1834 1834 1835 - for (i = 0; i < channel_list.reg_classes; i++) { 1836 - cnt += channel_list.reg_class[i].channels; 1835 + for (i = 0; i < channel_list->reg_classes; i++) { 1836 + cnt += channel_list->reg_class[i].channels; 1837 1837 } 1838 1838 1839 1839 return cnt; ··· 2065 2065 /* + number of channels in all classes */ 2066 2066 len_channellist_attr = 3 2067 2067 + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes) 2068 - + get_reg_classes_full_count(pmlmeext->channel_list); 2068 + + get_reg_classes_full_count(&pmlmeext->channel_list); 2069 2069 2070 2070 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2071 2071 p2pielen += 2; ··· 2437 2437 /* + number of channels in all classes */ 2438 2438 len_channellist_attr = 3 2439 2439 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2440 - + get_reg_classes_full_count(pmlmeext->channel_list); 2440 + + get_reg_classes_full_count(&pmlmeext->channel_list); 2441 2441 2442 2442 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2443 2443 ··· 2859 2859 /* + number of channels in all classes */ 2860 2860 len_channellist_attr = 3 2861 2861 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 2862 - + get_reg_classes_full_count(pmlmeext->channel_list); 2862 + + get_reg_classes_full_count(&pmlmeext->channel_list); 2863 2863 2864 2864 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 2865 2865 ··· 3120 3120 /* + number of channels in all classes */ 3121 3121 len_channellist_attr = 3 3122 3122 + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes 3123 - + get_reg_classes_full_count(pmlmeext->channel_list); 3123 + + get_reg_classes_full_count(&pmlmeext->channel_list); 3124 3124 3125 3125 *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr); 3126 3126 p2pielen += 2;
+1 -1
drivers/staging/rtl8188eu/core/rtw_wlan_util.c
··· 631 631 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; 632 632 633 633 if (pregpriv->wifi_spec == 1) { 634 - u32 j, tmp, change_inx; 634 + u32 j, tmp, change_inx = false; 635 635 636 636 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */ 637 637 for (i = 0; i < 4; i++) {
+1 -1
drivers/staging/rtl8188eu/include/odm.h
··· 1008 1008 #define DM_false_ALARM_THRESH_LOW 400 1009 1009 #define DM_false_ALARM_THRESH_HIGH 1000 1010 1010 1011 - #define DM_DIG_MAX_NIC 0x3e 1011 + #define DM_DIG_MAX_NIC 0x4e 1012 1012 #define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */ 1013 1013 1014 1014 #define DM_DIG_MAX_AP 0x32
+2 -2
drivers/staging/vt6656/card.c
··· 172 172 if (!CARDbIsOFDMinBasicRate(pDevice)) { 173 173 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO 174 174 "swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx); 175 - if (wRateIdx > RATE_24M) 176 - wRateIdx = RATE_24M; 175 + if (wRateIdx > RATE_24M) 176 + wRateIdx = RATE_24M; 177 177 return wRateIdx; 178 178 } 179 179
+1 -1
drivers/staging/xillybus/xillybus_core.c
··· 2054 2054 NULL, 2055 2055 MKDEV(major, i), 2056 2056 NULL, 2057 - devname); 2057 + "%s", devname); 2058 2058 2059 2059 if (IS_ERR(device)) { 2060 2060 pr_warn("xillybus: Failed to create %s "
-1
drivers/staging/zram/zram_drv.c
··· 981 981 MODULE_LICENSE("Dual BSD/GPL"); 982 982 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 983 983 MODULE_DESCRIPTION("Compressed RAM Block Device"); 984 - MODULE_ALIAS("devname:zram");
+2 -1
drivers/tty/tty_io.c
··· 854 854 struct pid *tty_pgrp = tty_get_pgrp(tty); 855 855 if (tty_pgrp) { 856 856 kill_pgrp(tty_pgrp, SIGHUP, on_exit); 857 - kill_pgrp(tty_pgrp, SIGCONT, on_exit); 857 + if (!on_exit) 858 + kill_pgrp(tty_pgrp, SIGCONT, on_exit); 858 859 put_pid(tty_pgrp); 859 860 } 860 861 }
-1
drivers/usb/dwc3/Kconfig
··· 1 1 config USB_DWC3 2 2 tristate "DesignWare USB3 DRD Core Support" 3 3 depends on (USB || USB_GADGET) && HAS_DMA 4 - depends on EXTCON 5 4 select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD 6 5 help 7 6 Say Y or M here if your system has a Dual Role SuperSpeed
+2
drivers/usb/dwc3/dwc3-pci.c
··· 28 28 /* FIXME define these in <linux/pci_ids.h> */ 29 29 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 30 30 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 31 + #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 31 32 32 33 struct dwc3_pci { 33 34 struct device *dev; ··· 188 187 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 189 188 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 190 189 }, 190 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 191 191 { } /* Terminating Entry */ 192 192 }; 193 193 MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+2 -4
drivers/usb/dwc3/gadget.c
··· 2611 2611 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 2612 2612 if (ret) { 2613 2613 dev_err(dwc->dev, "failed to register udc\n"); 2614 - goto err5; 2614 + goto err4; 2615 2615 } 2616 2616 2617 2617 return 0; 2618 2618 2619 - err5: 2620 - dwc3_gadget_free_endpoints(dwc); 2621 - 2622 2619 err4: 2620 + dwc3_gadget_free_endpoints(dwc); 2623 2621 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE, 2624 2622 dwc->ep0_bounce, dwc->ep0_bounce_addr); 2625 2623
+1 -18
drivers/usb/gadget/cdc2.c
··· 113 113 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; 114 114 } 115 115 116 - fi_ecm = usb_get_function_instance("ecm"); 117 - if (IS_ERR(fi_ecm)) { 118 - status = PTR_ERR(fi_ecm); 119 - goto err_func_ecm; 120 - } 121 - 122 116 f_ecm = usb_get_function(fi_ecm); 123 117 if (IS_ERR(f_ecm)) { 124 118 status = PTR_ERR(f_ecm); ··· 123 129 if (status) 124 130 goto err_add_ecm; 125 131 126 - fi_serial = usb_get_function_instance("acm"); 127 - if (IS_ERR(fi_serial)) { 128 - status = PTR_ERR(fi_serial); 129 - goto err_get_acm; 130 - } 131 - 132 132 f_acm = usb_get_function(fi_serial); 133 133 if (IS_ERR(f_acm)) { 134 134 status = PTR_ERR(f_acm); 135 - goto err_func_acm; 135 + goto err_get_acm; 136 136 } 137 137 138 138 status = usb_add_function(c, f_acm); 139 139 if (status) 140 140 goto err_add_acm; 141 - 142 141 return 0; 143 142 144 143 err_add_acm: 145 144 usb_put_function(f_acm); 146 - err_func_acm: 147 - usb_put_function_instance(fi_serial); 148 145 err_get_acm: 149 146 usb_remove_function(c, f_ecm); 150 147 err_add_ecm: 151 148 usb_put_function(f_ecm); 152 149 err_get_ecm: 153 - usb_put_function_instance(fi_ecm); 154 - err_func_ecm: 155 150 return status; 156 151 } 157 152
+4 -3
drivers/usb/gadget/dummy_hcd.c
··· 923 923 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 924 924 struct dummy *dum = dum_hcd->dum; 925 925 926 - dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n", 927 - driver->driver.name); 926 + if (driver) 927 + dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n", 928 + driver->driver.name); 928 929 929 930 dum->driver = NULL; 930 931 ··· 1001 1000 { 1002 1001 struct dummy *dum = platform_get_drvdata(pdev); 1003 1002 1004 - usb_del_gadget_udc(&dum->gadget); 1005 1003 device_remove_file(&dum->gadget.dev, &dev_attr_function); 1004 + usb_del_gadget_udc(&dum->gadget); 1006 1005 return 0; 1007 1006 } 1008 1007
+1 -1
drivers/usb/gadget/f_ecm.c
··· 995 995 usb_ep_free_request(ecm->notify, ecm->notify_req); 996 996 } 997 997 998 - struct usb_function *ecm_alloc(struct usb_function_instance *fi) 998 + static struct usb_function *ecm_alloc(struct usb_function_instance *fi) 999 999 { 1000 1000 struct f_ecm *ecm; 1001 1001 struct f_ecm_opts *opts;
+1 -1
drivers/usb/gadget/f_eem.c
··· 624 624 usb_free_all_descriptors(f); 625 625 } 626 626 627 - struct usb_function *eem_alloc(struct usb_function_instance *fi) 627 + static struct usb_function *eem_alloc(struct usb_function_instance *fi) 628 628 { 629 629 struct f_eem *eem; 630 630 struct f_eem_opts *opts;
+2
drivers/usb/gadget/f_mass_storage.c
··· 2260 2260 /* Disable the endpoints */ 2261 2261 if (fsg->bulk_in_enabled) { 2262 2262 usb_ep_disable(fsg->bulk_in); 2263 + fsg->bulk_in->driver_data = NULL; 2263 2264 fsg->bulk_in_enabled = 0; 2264 2265 } 2265 2266 if (fsg->bulk_out_enabled) { 2266 2267 usb_ep_disable(fsg->bulk_out); 2268 + fsg->bulk_out->driver_data = NULL; 2267 2269 fsg->bulk_out_enabled = 0; 2268 2270 } 2269 2271
+1 -1
drivers/usb/gadget/fotg210-udc.c
··· 1214 1214 1215 1215 module_platform_driver(fotg210_driver); 1216 1216 1217 - MODULE_AUTHOR("Yuan-Hsin Chen <yhchen@faraday-tech.com>"); 1217 + MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>"); 1218 1218 MODULE_LICENSE("GPL"); 1219 1219 MODULE_DESCRIPTION(DRIVER_DESC);
+1 -1
drivers/usb/gadget/fusb300_udc.c
··· 22 22 23 23 MODULE_DESCRIPTION("FUSB300 USB gadget driver"); 24 24 MODULE_LICENSE("GPL"); 25 - MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); 25 + MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>"); 26 26 MODULE_ALIAS("platform:fusb300_udc"); 27 27 28 28 #define DRIVER_VERSION "20 October 2010"
+4 -4
drivers/usb/gadget/multi.c
··· 179 179 return ret; 180 180 } 181 181 182 - static int rndis_config_register(struct usb_composite_dev *cdev) 182 + static __ref int rndis_config_register(struct usb_composite_dev *cdev) 183 183 { 184 184 static struct usb_configuration config = { 185 185 .bConfigurationValue = MULTI_RNDIS_CONFIG_NUM, ··· 194 194 195 195 #else 196 196 197 - static int rndis_config_register(struct usb_composite_dev *cdev) 197 + static __ref int rndis_config_register(struct usb_composite_dev *cdev) 198 198 { 199 199 return 0; 200 200 } ··· 241 241 return ret; 242 242 } 243 243 244 - static int cdc_config_register(struct usb_composite_dev *cdev) 244 + static __ref int cdc_config_register(struct usb_composite_dev *cdev) 245 245 { 246 246 static struct usb_configuration config = { 247 247 .bConfigurationValue = MULTI_CDC_CONFIG_NUM, ··· 256 256 257 257 #else 258 258 259 - static int cdc_config_register(struct usb_composite_dev *cdev) 259 + static __ref int cdc_config_register(struct usb_composite_dev *cdev) 260 260 { 261 261 return 0; 262 262 }
+3
drivers/usb/gadget/mv_u3d_core.c
··· 645 645 struct mv_u3d_ep *ep; 646 646 struct mv_u3d_ep_context *ep_context; 647 647 u32 epxcr, direction; 648 + unsigned long flags; 648 649 649 650 if (!_ep) 650 651 return -EINVAL; ··· 662 661 direction = mv_u3d_ep_dir(ep); 663 662 664 663 /* nuke all pending requests (does flush) */ 664 + spin_lock_irqsave(&u3d->lock, flags); 665 665 mv_u3d_nuke(ep, -ESHUTDOWN); 666 + spin_unlock_irqrestore(&u3d->lock, flags); 666 667 667 668 /* Disable the endpoint for Rx or Tx and reset the endpoint type */ 668 669 if (direction == MV_U3D_EP_DIR_OUT) {
+4 -9
drivers/usb/gadget/s3c-hsotg.c
··· 2475 2475 if (gintsts & GINTSTS_ErlySusp) { 2476 2476 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n"); 2477 2477 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS); 2478 - 2479 - s3c_hsotg_disconnect(hsotg); 2480 2478 } 2481 2479 2482 2480 /* ··· 2960 2962 if (!hsotg) 2961 2963 return -ENODEV; 2962 2964 2963 - if (!driver || driver != hsotg->driver || !driver->unbind) 2964 - return -EINVAL; 2965 - 2966 2965 /* all endpoints should be shutdown */ 2967 2966 for (ep = 0; ep < hsotg->num_of_eps; ep++) 2968 2967 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); ··· 2967 2972 spin_lock_irqsave(&hsotg->lock, flags); 2968 2973 2969 2974 s3c_hsotg_phy_disable(hsotg); 2970 - regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); 2971 2975 2972 - hsotg->driver = NULL; 2976 + if (!driver) 2977 + hsotg->driver = NULL; 2978 + 2973 2979 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2974 2980 2975 2981 spin_unlock_irqrestore(&hsotg->lock, flags); 2976 2982 2977 - dev_info(hsotg->dev, "unregistered gadget driver '%s'\n", 2978 - driver->driver.name); 2983 + regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); 2979 2984 2980 2985 return 0; 2981 2986 }
+1 -1
drivers/usb/host/ehci-fsl.c
··· 669 669 * generic hardware linkage 670 670 */ 671 671 .irq = ehci_irq, 672 - .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 672 + .flags = HCD_USB2 | HCD_MEMORY, 673 673 674 674 /* 675 675 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-grlib.c
··· 43 43 * generic hardware linkage 44 44 */ 45 45 .irq = ehci_irq, 46 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 46 + .flags = HCD_MEMORY | HCD_USB2, 47 47 48 48 /* 49 49 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-hcd.c
··· 1158 1158 * generic hardware linkage 1159 1159 */ 1160 1160 .irq = ehci_irq, 1161 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 1161 + .flags = HCD_MEMORY | HCD_USB2, 1162 1162 1163 1163 /* 1164 1164 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-mv.c
··· 96 96 * generic hardware linkage 97 97 */ 98 98 .irq = ehci_irq, 99 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 99 + .flags = HCD_MEMORY | HCD_USB2, 100 100 101 101 /* 102 102 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-octeon.c
··· 51 51 * generic hardware linkage 52 52 */ 53 53 .irq = ehci_irq, 54 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 54 + .flags = HCD_MEMORY | HCD_USB2, 55 55 56 56 /* 57 57 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-pmcmsp.c
··· 286 286 #else 287 287 .irq = ehci_irq, 288 288 #endif 289 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 289 + .flags = HCD_MEMORY | HCD_USB2, 290 290 291 291 /* 292 292 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-ppc-of.c
··· 28 28 * generic hardware linkage 29 29 */ 30 30 .irq = ehci_irq, 31 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 31 + .flags = HCD_MEMORY | HCD_USB2, 32 32 33 33 /* 34 34 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-ps3.c
··· 71 71 .product_desc = "PS3 EHCI Host Controller", 72 72 .hcd_priv_size = sizeof(struct ehci_hcd), 73 73 .irq = ehci_irq, 74 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 74 + .flags = HCD_MEMORY | HCD_USB2, 75 75 .reset = ps3_ehci_hc_reset, 76 76 .start = ehci_run, 77 77 .stop = ehci_stop,
+5
drivers/usb/host/ehci-q.c
··· 247 247 248 248 static void 249 249 ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) 250 + __releases(ehci->lock) 251 + __acquires(ehci->lock) 250 252 { 251 253 if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { 252 254 /* ... update hc-wide periodic stats */ ··· 274 272 urb->actual_length, urb->transfer_buffer_length); 275 273 #endif 276 274 275 + /* complete() can reenter this HCD */ 277 276 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); 277 + spin_unlock (&ehci->lock); 278 278 usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); 279 + spin_lock (&ehci->lock); 279 280 } 280 281 281 282 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
+1 -1
drivers/usb/host/ehci-sead3.c
··· 55 55 * generic hardware linkage 56 56 */ 57 57 .irq = ehci_irq, 58 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 58 + .flags = HCD_MEMORY | HCD_USB2, 59 59 60 60 /* 61 61 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-sh.c
··· 36 36 * generic hardware linkage 37 37 */ 38 38 .irq = ehci_irq, 39 - .flags = HCD_USB2 | HCD_MEMORY | HCD_BH, 39 + .flags = HCD_USB2 | HCD_MEMORY, 40 40 41 41 /* 42 42 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-tilegx.c
··· 61 61 * Generic hardware linkage. 62 62 */ 63 63 .irq = ehci_irq, 64 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 64 + .flags = HCD_MEMORY | HCD_USB2, 65 65 66 66 /* 67 67 * Basic lifecycle operations.
+1 -1
drivers/usb/host/ehci-w90x900.c
··· 108 108 * generic hardware linkage 109 109 */ 110 110 .irq = ehci_irq, 111 - .flags = HCD_USB2|HCD_MEMORY|HCD_BH, 111 + .flags = HCD_USB2|HCD_MEMORY, 112 112 113 113 /* 114 114 * basic lifecycle operations
+1 -1
drivers/usb/host/ehci-xilinx-of.c
··· 79 79 * generic hardware linkage 80 80 */ 81 81 .irq = ehci_irq, 82 - .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, 82 + .flags = HCD_MEMORY | HCD_USB2, 83 83 84 84 /* 85 85 * basic lifecycle operations
+3 -3
drivers/usb/host/fsl-mph-dr-of.c
··· 24 24 enum fsl_usb2_operating_modes op_mode; /* operating mode */ 25 25 }; 26 26 27 - struct fsl_usb2_dev_data dr_mode_data[] = { 27 + static struct fsl_usb2_dev_data dr_mode_data[] = { 28 28 { 29 29 .dr_mode = "host", 30 30 .drivers = { "fsl-ehci", NULL, NULL, }, ··· 42 42 }, 43 43 }; 44 44 45 - struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np) 45 + static struct fsl_usb2_dev_data *get_dr_mode_data(struct device_node *np) 46 46 { 47 47 const unsigned char *prop; 48 48 int i; ··· 75 75 return FSL_USB2_PHY_NONE; 76 76 } 77 77 78 - struct platform_device *fsl_usb2_device_register( 78 + static struct platform_device *fsl_usb2_device_register( 79 79 struct platform_device *ofdev, 80 80 struct fsl_usb2_platform_data *pdata, 81 81 const char *name, int id)
+1 -1
drivers/usb/phy/phy-omap-usb3.c
··· 79 79 return &dpll_map[i].params; 80 80 } 81 81 82 - return 0; 82 + return NULL; 83 83 } 84 84 85 85 static int omap_usb3_suspend(struct usb_phy *x, int suspend)
+1 -1
drivers/usb/serial/Kconfig
··· 60 60 - Suunto ANT+ USB device. 61 61 - Fundamental Software dongle. 62 62 - HP4x calculators 63 - - a number of Motoroloa phones 63 + - a number of Motorola phones 64 64 - Siemens USB/MPI adapter. 65 65 - ViVOtech ViVOpay USB device. 66 66 - Infineon Modem Flashloader USB interface
+32 -11
drivers/usb/serial/pl2303.c
··· 139 139 HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */ 140 140 HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */ 141 141 TB, /* TB version */ 142 + HX_CLONE, /* Cheap and less functional clone of the HX chip */ 142 143 }; 143 144 /* 144 145 * NOTE: don't know the difference between type 0 and type 1, ··· 207 206 * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB 208 207 */ 209 208 if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) { 210 - type = HX_TA; 211 - type_str = "X/HX/TA"; 209 + /* Check if the device is a clone */ 210 + pl2303_vendor_read(0x9494, 0, serial, buf); 211 + /* 212 + * NOTE: Not sure if this read is really needed. 213 + * The HX returns 0x00, the clone 0x02, but the Windows 214 + * driver seems to ignore the value and continues. 215 + */ 216 + pl2303_vendor_write(0x0606, 0xaa, serial); 217 + pl2303_vendor_read(0x8686, 0, serial, buf); 218 + if (buf[0] != 0xaa) { 219 + type = HX_CLONE; 220 + type_str = "X/HX clone (limited functionality)"; 221 + } else { 222 + type = HX_TA; 223 + type_str = "X/HX/TA"; 224 + } 225 + pl2303_vendor_write(0x0606, 0x00, serial); 212 226 } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice) 213 227 == 0x400) { 214 228 type = HXD_EA_RA_SA; ··· 321 305 { 322 306 /* 323 307 * NOTE: Only the values defined in baud_sup are supported ! 324 - * => if unsupported values are set, the PL2303 seems to 325 - * use 9600 baud (at least my PL2303X always does) 308 + * => if unsupported values are set, the PL2303 uses 9600 baud instead 309 + * => HX clones just don't work at unsupported baud rates < 115200 baud, 310 + * for baud rates > 115200 they run at 115200 baud 326 311 */ 327 312 const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600, 328 313 4800, 7200, 9600, 14400, 19200, 28800, 38400, ··· 333 316 * NOTE: With the exception of type_0/1 devices, the following 334 317 * additional baud rates are supported (tested with HX rev. 3A only): 335 318 * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800, 336 - * 403200, 806400. (*: not HX) 319 + * 403200, 806400. (*: not HX and HX clones) 337 320 * 338 321 * Maximum values: HXD, TB: 12000000; HX, TA: 6000000; 339 - * type_0+1: 1228800; RA: 921600; SA: 115200 322 + * type_0+1: 1228800; RA: 921600; HX clones, SA: 115200 340 323 * 341 324 * As long as we are not using this encoding method for anything else 342 - * than the type_0+1 and HX chips, there is no point in complicating 343 - * the code to support them. 325 + * than the type_0+1, HX and HX clone chips, there is no point in 326 + * complicating the code to support them. 344 327 */ 345 328 int i; 346 329 ··· 364 347 baud = min_t(int, baud, 6000000); 365 348 else if (type == type_0 || type == type_1) 366 349 baud = min_t(int, baud, 1228800); 350 + else if (type == HX_CLONE) 351 + baud = min_t(int, baud, 115200); 367 352 /* Direct (standard) baud rate encoding method */ 368 353 put_unaligned_le32(baud, buf); 369 354 ··· 378 359 /* 379 360 * Divisor based baud rate encoding method 380 361 * 381 - * NOTE: it's not clear if the type_0/1 chips support this method 362 + * NOTE: HX clones do NOT support this method. 363 + * It's not clear if the type_0/1 chips support it. 382 364 * 383 365 * divisor = 12MHz * 32 / baudrate = 2^A * B 384 366 * ··· 472 452 * 1) Direct method: encodes the baud rate value directly 473 453 * => supported by all chip types 474 454 * 2) Divisor based method: encodes a divisor to a base value (12MHz*32) 475 - * => supported by HX chips (and likely not by type_0/1 chips) 455 + * => not supported by HX clones (and likely type_0/1 chips) 476 456 * 477 457 * NOTE: Although the divisor based baud rate encoding method is much 478 458 * more flexible, some of the standard baud rate values can not be ··· 480 460 * the device likely uses the same baud rate generator for both methods 481 461 * so that there is likley no difference. 482 462 */ 483 - if (type == type_0 || type == type_1) 463 + if (type == type_0 || type == type_1 || type == HX_CLONE) 484 464 baud = pl2303_baudrate_encode_direct(baud, type, buf); 485 465 else 486 466 baud = pl2303_baudrate_encode_divisor(baud, type, buf); ··· 833 813 result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 834 814 BREAK_REQUEST, BREAK_REQUEST_TYPE, state, 835 815 0, NULL, 0, 100); 816 + /* NOTE: HX clones don't support sending breaks, -EPIPE is returned */ 836 817 if (result) 837 818 dev_err(&port->dev, "error sending break = %d\n", result); 838 819 }
+28 -15
drivers/vhost/scsi.c
··· 461 461 u32 i; 462 462 for (i = 0; i < tv_cmd->tvc_sgl_count; i++) 463 463 put_page(sg_page(&tv_cmd->tvc_sgl[i])); 464 - } 464 + } 465 465 466 466 tcm_vhost_put_inflight(tv_cmd->inflight); 467 467 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); ··· 1373 1373 return 0; 1374 1374 } 1375 1375 1376 + static void vhost_scsi_free(struct vhost_scsi *vs) 1377 + { 1378 + if (is_vmalloc_addr(vs)) 1379 + vfree(vs); 1380 + else 1381 + kfree(vs); 1382 + } 1383 + 1376 1384 static int vhost_scsi_open(struct inode *inode, struct file *f) 1377 1385 { 1378 1386 struct vhost_scsi *vs; 1379 1387 struct vhost_virtqueue **vqs; 1380 - int r, i; 1388 + int r = -ENOMEM, i; 1381 1389 1382 - vs = kzalloc(sizeof(*vs), GFP_KERNEL); 1383 - if (!vs) 1384 - return -ENOMEM; 1390 + vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 1391 + if (!vs) { 1392 + vs = vzalloc(sizeof(*vs)); 1393 + if (!vs) 1394 + goto err_vs; 1395 + } 1385 1396 1386 1397 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); 1387 - if (!vqs) { 1388 - kfree(vs); 1389 - return -ENOMEM; 1390 - } 1398 + if (!vqs) 1399 + goto err_vqs; 1391 1400 1392 1401 vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); 1393 1402 vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); ··· 1416 1407 1417 1408 tcm_vhost_init_inflight(vs, NULL); 1418 1409 1419 - if (r < 0) { 1420 - kfree(vqs); 1421 - kfree(vs); 1422 - return r; 1423 - } 1410 + if (r < 0) 1411 + goto err_init; 1424 1412 1425 1413 f->private_data = vs; 1426 1414 return 0; 1415 + 1416 + err_init: 1417 + kfree(vqs); 1418 + err_vqs: 1419 + vhost_scsi_free(vs); 1420 + err_vs: 1421 + return r; 1427 1422 } 1428 1423 1429 1424 static int vhost_scsi_release(struct inode *inode, struct file *f) ··· 1444 1431 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1445 1432 vhost_scsi_flush(vs); 1446 1433 kfree(vs->dev.vqs); 1447 - kfree(vs); 1434 + vhost_scsi_free(vs); 1448 1435 return 0; 1449 1436 } 1450 1437
+3 -1
drivers/vhost/vhost.c
··· 161 161 if (list_empty(&work->node)) { 162 162 list_add_tail(&work->node, &dev->work_list); 163 163 work->queue_seq++; 164 + spin_unlock_irqrestore(&dev->work_lock, flags); 164 165 wake_up_process(dev->worker); 166 + } else { 167 + spin_unlock_irqrestore(&dev->work_lock, flags); 165 168 } 166 - spin_unlock_irqrestore(&dev->work_lock, flags); 167 169 } 168 170 EXPORT_SYMBOL_GPL(vhost_work_queue); 169 171
+4 -3
fs/9p/v9fs.c
··· 603 603 if (ret < 0) 604 604 return ret; 605 605 #ifdef CONFIG_9P_FSCACHE 606 - return fscache_register_netfs(&v9fs_cache_netfs); 607 - #else 608 - return ret; 606 + ret = fscache_register_netfs(&v9fs_cache_netfs); 607 + if (ret < 0) 608 + v9fs_destroy_inode_cache(); 609 609 #endif 610 + return ret; 610 611 } 611 612 612 613 static void v9fs_cache_unregister(void)
+1 -7
fs/9p/vfs_inode_dotl.c
··· 267 267 } 268 268 269 269 /* Only creates */ 270 - if (!(flags & O_CREAT)) 270 + if (!(flags & O_CREAT) || dentry->d_inode) 271 271 return finish_no_open(file, res); 272 - else if (dentry->d_inode) { 273 - if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 274 - return -EEXIST; 275 - else 276 - return finish_no_open(file, res); 277 - } 278 272 279 273 v9ses = v9fs_inode2v9ses(dir); 280 274
+3 -10
fs/autofs4/waitq.c
··· 109 109 110 110 pkt.hdr.proto_version = sbi->version; 111 111 pkt.hdr.type = type; 112 - mutex_lock(&sbi->wq_mutex); 113 112 114 - /* Check if we have become catatonic */ 115 - if (sbi->catatonic) { 116 - mutex_unlock(&sbi->wq_mutex); 117 - return; 118 - } 119 113 switch (type) { 120 114 /* Kernel protocol v4 missing and expire packets */ 121 115 case autofs_ptype_missing: ··· 421 427 wq->tgid = current->tgid; 422 428 wq->status = -EINTR; /* Status return if interrupted */ 423 429 wq->wait_ctr = 2; 424 - mutex_unlock(&sbi->wq_mutex); 425 430 426 431 if (sbi->version < 5) { 427 432 if (notify == NFY_MOUNT) ··· 442 449 (unsigned long) wq->wait_queue_token, wq->name.len, 443 450 wq->name.name, notify); 444 451 445 - /* autofs4_notify_daemon() may block */ 452 + /* autofs4_notify_daemon() may block; it will unlock ->wq_mutex */ 446 453 autofs4_notify_daemon(sbi, wq, type); 447 454 } else { 448 455 wq->wait_ctr++; 449 - mutex_unlock(&sbi->wq_mutex); 450 - kfree(qstr.name); 451 456 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 452 457 (unsigned long) wq->wait_queue_token, wq->name.len, 453 458 wq->name.name, notify); 459 + mutex_unlock(&sbi->wq_mutex); 460 + kfree(qstr.name); 454 461 } 455 462 456 463 /*
+1 -1
fs/bio-integrity.c
··· 735 735 mempool_destroy(bs->bio_integrity_pool); 736 736 737 737 if (bs->bvec_integrity_pool) 738 - mempool_destroy(bs->bio_integrity_pool); 738 + mempool_destroy(bs->bvec_integrity_pool); 739 739 } 740 740 EXPORT_SYMBOL(bioset_integrity_free); 741 741
+4 -1
fs/btrfs/btrfs_inode.h
··· 213 213 static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) 214 214 { 215 215 if (BTRFS_I(inode)->logged_trans == generation && 216 - BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit) 216 + BTRFS_I(inode)->last_sub_trans <= 217 + BTRFS_I(inode)->last_log_commit && 218 + BTRFS_I(inode)->last_sub_trans <= 219 + BTRFS_I(inode)->root->last_log_commit) 217 220 return 1; 218 221 return 0; 219 222 }
+5 -2
fs/btrfs/ctree.c
··· 1005 1005 return ret; 1006 1006 } 1007 1007 1008 - if (root->ref_cows) 1009 - btrfs_reloc_cow_block(trans, root, buf, cow); 1008 + if (root->ref_cows) { 1009 + ret = btrfs_reloc_cow_block(trans, root, buf, cow); 1010 + if (ret) 1011 + return ret; 1012 + } 1010 1013 1011 1014 if (buf == root->node) { 1012 1015 WARN_ON(parent && parent != buf);
+4 -13
fs/btrfs/ctree.h
··· 1118 1118 */ 1119 1119 struct percpu_counter total_bytes_pinned; 1120 1120 1121 - /* 1122 - * we bump reservation progress every time we decrement 1123 - * bytes_reserved. This way people waiting for reservations 1124 - * know something good has happened and they can check 1125 - * for progress. The number here isn't to be trusted, it 1126 - * just shows reclaim activity 1127 - */ 1128 - unsigned long reservation_progress; 1129 - 1130 1121 unsigned int full:1; /* indicates that we cannot allocate any more 1131 1122 chunks for this space */ 1132 1123 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ ··· 3126 3135 unsigned num_items) 3127 3136 { 3128 3137 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3129 - 3 * num_items; 3138 + 2 * num_items; 3130 3139 } 3131 3140 3132 3141 /* ··· 3930 3939 struct btrfs_root *root); 3931 3940 int btrfs_recover_relocation(struct btrfs_root *root); 3932 3941 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 3933 - void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 3934 - struct btrfs_root *root, struct extent_buffer *buf, 3935 - struct extent_buffer *cow); 3942 + int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 3943 + struct btrfs_root *root, struct extent_buffer *buf, 3944 + struct extent_buffer *cow); 3936 3945 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 3937 3946 struct btrfs_pending_snapshot *pending, 3938 3947 u64 *bytes_to_reserve);
+2 -2
fs/btrfs/dev-replace.c
··· 400 400 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; 401 401 btrfs_dev_replace_unlock(dev_replace); 402 402 403 - btrfs_wait_all_ordered_extents(root->fs_info, 0); 403 + btrfs_wait_all_ordered_extents(root->fs_info); 404 404 405 405 /* force writing the updated state information to disk */ 406 406 trans = btrfs_start_transaction(root, 0); ··· 475 475 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 476 476 return ret; 477 477 } 478 - btrfs_wait_all_ordered_extents(root->fs_info, 0); 478 + btrfs_wait_all_ordered_extents(root->fs_info); 479 479 480 480 trans = btrfs_start_transaction(root, 0); 481 481 if (IS_ERR(trans)) {
+2
fs/btrfs/disk-io.c
··· 157 157 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 158 158 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 159 159 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 160 + { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, 160 161 { .id = 0, .name_stem = "tree" }, 161 162 }; 162 163 ··· 3416 3415 if (total_errors > max_errors) { 3417 3416 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 3418 3417 total_errors); 3418 + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 3419 3419 3420 3420 /* FUA is masked off if unsupported and can't be the reason */ 3421 3421 btrfs_error(root->fs_info, -EIO,
+30 -27
fs/btrfs/extent-tree.c
··· 3925 3925 u64 space_size; 3926 3926 u64 avail; 3927 3927 u64 used; 3928 - u64 to_add; 3929 3928 3930 3929 used = space_info->bytes_used + space_info->bytes_reserved + 3931 3930 space_info->bytes_pinned + space_info->bytes_readonly; ··· 3958 3959 BTRFS_BLOCK_GROUP_RAID10)) 3959 3960 avail >>= 1; 3960 3961 3961 - to_add = space_info->total_bytes; 3962 - 3963 3962 /* 3964 3963 * If we aren't flushing all things, let us overcommit up to 3965 3964 * 1/2th of the space. If we can flush, don't let us overcommit 3966 3965 * too much, let it overcommit up to 1/8 of the space. 3967 3966 */ 3968 3967 if (flush == BTRFS_RESERVE_FLUSH_ALL) 3969 - to_add >>= 3; 3968 + avail >>= 3; 3970 3969 else 3971 - to_add >>= 1; 3970 + avail >>= 1; 3972 3971 3973 - /* 3974 - * Limit the overcommit to the amount of free space we could possibly 3975 - * allocate for chunks. 3976 - */ 3977 - to_add = min(avail, to_add); 3978 - 3979 - if (used + bytes < space_info->total_bytes + to_add) 3972 + if (used + bytes < space_info->total_bytes + avail) 3980 3973 return 1; 3981 3974 return 0; 3982 3975 } ··· 3991 4000 */ 3992 4001 btrfs_start_all_delalloc_inodes(root->fs_info, 0); 3993 4002 if (!current->journal_info) 3994 - btrfs_wait_all_ordered_extents(root->fs_info, 0); 4003 + btrfs_wait_all_ordered_extents(root->fs_info); 3995 4004 } 3996 4005 } 3997 4006 ··· 4021 4030 if (delalloc_bytes == 0) { 4022 4031 if (trans) 4023 4032 return; 4024 - btrfs_wait_all_ordered_extents(root->fs_info, 0); 4033 + btrfs_wait_all_ordered_extents(root->fs_info); 4025 4034 return; 4026 4035 } 4027 4036 ··· 4049 4058 4050 4059 loops++; 4051 4060 if (wait_ordered && !trans) { 4052 - btrfs_wait_all_ordered_extents(root->fs_info, 0); 4061 + btrfs_wait_all_ordered_extents(root->fs_info); 4053 4062 } else { 4054 4063 time_left = schedule_timeout_killable(1); 4055 4064 if (time_left) ··· 4456 4465 space_info->bytes_may_use -= num_bytes; 4457 4466 trace_btrfs_space_reservation(fs_info, "space_info", 4458 4467 space_info->flags, num_bytes, 0); 4459 - space_info->reservation_progress++; 4460 4468 spin_unlock(&space_info->lock); 4461 4469 } 4462 4470 } ··· 4656 4666 sinfo->bytes_may_use -= num_bytes; 4657 4667 trace_btrfs_space_reservation(fs_info, "space_info", 4658 4668 sinfo->flags, num_bytes, 0); 4659 - sinfo->reservation_progress++; 4660 4669 block_rsv->reserved = block_rsv->size; 4661 4670 block_rsv->full = 1; 4662 4671 } ··· 5435 5446 space_info->bytes_readonly += num_bytes; 5436 5447 cache->reserved -= num_bytes; 5437 5448 space_info->bytes_reserved -= num_bytes; 5438 - space_info->reservation_progress++; 5439 5449 } 5440 5450 spin_unlock(&cache->lock); 5441 5451 spin_unlock(&space_info->lock); ··· 6105 6117 /* 6106 6118 * walks the btree of allocated extents and find a hole of a given size. 6107 6119 * The key ins is changed to record the hole: 6108 - * ins->objectid == block start 6120 + * ins->objectid == start position 6109 6121 * ins->flags = BTRFS_EXTENT_ITEM_KEY 6110 - * ins->offset == number of blocks 6122 + * ins->offset == the size of the hole. 6111 6123 * Any available blocks before search_start are skipped. 6124 + * 6125 + * If there is no suitable free space, we will record the max size of 6126 + * the free space extent currently. 6112 6127 */ 6113 6128 static noinline int find_free_extent(struct btrfs_root *orig_root, 6114 6129 u64 num_bytes, u64 empty_size, ··· 6124 6133 struct btrfs_block_group_cache *block_group = NULL; 6125 6134 struct btrfs_block_group_cache *used_block_group; 6126 6135 u64 search_start = 0; 6136 + u64 max_extent_size = 0; 6127 6137 int empty_cluster = 2 * 1024 * 1024; 6128 6138 struct btrfs_space_info *space_info; 6129 6139 int loop = 0; ··· 6284 6292 btrfs_get_block_group(used_block_group); 6285 6293 6286 6294 offset = btrfs_alloc_from_cluster(used_block_group, 6287 - last_ptr, num_bytes, used_block_group->key.objectid); 6295 + last_ptr, 6296 + num_bytes, 6297 + used_block_group->key.objectid, 6298 + &max_extent_size); 6288 6299 if (offset) { 6289 6300 /* we have a block, we're done */ 6290 6301 spin_unlock(&last_ptr->refill_lock); ··· 6350 6355 * cluster 6351 6356 */ 6352 6357 offset = btrfs_alloc_from_cluster(block_group, 6353 - last_ptr, num_bytes, 6354 - search_start); 6358 + last_ptr, 6359 + num_bytes, 6360 + search_start, 6361 + &max_extent_size); 6355 6362 if (offset) { 6356 6363 /* we found one, proceed */ 6357 6364 spin_unlock(&last_ptr->refill_lock); ··· 6388 6391 if (cached && 6389 6392 block_group->free_space_ctl->free_space < 6390 6393 num_bytes + empty_cluster + empty_size) { 6394 + if (block_group->free_space_ctl->free_space > 6395 + max_extent_size) 6396 + max_extent_size = 6397 + block_group->free_space_ctl->free_space; 6391 6398 spin_unlock(&block_group->free_space_ctl->tree_lock); 6392 6399 goto loop; 6393 6400 } 6394 6401 spin_unlock(&block_group->free_space_ctl->tree_lock); 6395 6402 6396 6403 offset = btrfs_find_space_for_alloc(block_group, search_start, 6397 - num_bytes, empty_size); 6404 + num_bytes, empty_size, 6405 + &max_extent_size); 6398 6406 /* 6399 6407 * If we didn't find a chunk, and we haven't failed on this 6400 6408 * block group before, and this block group is in the middle of ··· 6517 6515 ret = 0; 6518 6516 } 6519 6517 out: 6520 - 6518 + if (ret == -ENOSPC) 6519 + ins->offset = max_extent_size; 6521 6520 return ret; 6522 6521 } 6523 6522 ··· 6576 6573 flags); 6577 6574 6578 6575 if (ret == -ENOSPC) { 6579 - if (!final_tried) { 6580 - num_bytes = num_bytes >> 1; 6576 + if (!final_tried && ins->offset) { 6577 + num_bytes = min(num_bytes >> 1, ins->offset); 6581 6578 num_bytes = round_down(num_bytes, root->sectorsize); 6582 6579 num_bytes = max(num_bytes, min_alloc_size); 6583 6580 if (num_bytes == min_alloc_size)
+5 -3
fs/btrfs/extent_io.c
··· 1481 1481 *end = state->end; 1482 1482 cur_start = state->end + 1; 1483 1483 node = rb_next(node); 1484 - if (!node) 1485 - break; 1486 1484 total_bytes += state->end - state->start + 1; 1487 - if (total_bytes >= max_bytes) 1485 + if (total_bytes >= max_bytes) { 1486 + *end = *start + max_bytes - 1; 1487 + break; 1488 + } 1489 + if (!node) 1488 1490 break; 1489 1491 } 1490 1492 out:
+2 -2
fs/btrfs/file.c
··· 1859 1859 1860 1860 ret = btrfs_log_dentry_safe(trans, root, dentry); 1861 1861 if (ret < 0) { 1862 - mutex_unlock(&inode->i_mutex); 1863 - goto out; 1862 + /* Fallthrough and commit/free transaction. */ 1863 + ret = 1; 1864 1864 } 1865 1865 1866 1866 /* we've logged all the items and now have a consistent
+47 -20
fs/btrfs/free-space-cache.c
··· 1431 1431 ctl->free_space += bytes; 1432 1432 } 1433 1433 1434 + /* 1435 + * If we can not find suitable extent, we will use bytes to record 1436 + * the size of the max extent. 1437 + */ 1434 1438 static int search_bitmap(struct btrfs_free_space_ctl *ctl, 1435 1439 struct btrfs_free_space *bitmap_info, u64 *offset, 1436 1440 u64 *bytes) 1437 1441 { 1438 1442 unsigned long found_bits = 0; 1443 + unsigned long max_bits = 0; 1439 1444 unsigned long bits, i; 1440 1445 unsigned long next_zero; 1446 + unsigned long extent_bits; 1441 1447 1442 1448 i = offset_to_bit(bitmap_info->offset, ctl->unit, 1443 1449 max_t(u64, *offset, bitmap_info->offset)); ··· 1452 1446 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) { 1453 1447 next_zero = find_next_zero_bit(bitmap_info->bitmap, 1454 1448 BITS_PER_BITMAP, i); 1455 - if ((next_zero - i) >= bits) { 1456 - found_bits = next_zero - i; 1449 + extent_bits = next_zero - i; 1450 + if (extent_bits >= bits) { 1451 + found_bits = extent_bits; 1457 1452 break; 1453 + } else if (extent_bits > max_bits) { 1454 + max_bits = extent_bits; 1458 1455 } 1459 1456 i = next_zero; 1460 1457 } ··· 1468 1459 return 0; 1469 1460 } 1470 1461 1462 + *bytes = (u64)(max_bits) * ctl->unit; 1471 1463 return -1; 1472 1464 } 1473 1465 1466 + /* Cache the size of the max extent in bytes */ 1474 1467 static struct btrfs_free_space * 1475 1468 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes, 1476 - unsigned long align) 1469 + unsigned long align, u64 *max_extent_size) 1477 1470 { 1478 1471 struct btrfs_free_space *entry; 1479 1472 struct rb_node *node; 1480 - u64 ctl_off; 1481 1473 u64 tmp; 1482 1474 u64 align_off; 1483 1475 int ret; 1484 1476 1485 1477 if (!ctl->free_space_offset.rb_node) 1486 - return NULL; 1478 + goto out; 1487 1479 1488 1480 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); 1489 1481 if (!entry) 1490 - return NULL; 1482 + goto out; 1491 1483 1492 1484 for (node = &entry->offset_index; node; node = rb_next(node)) { 1493 1485 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1494 - if (entry->bytes < *bytes) 1486 + if (entry->bytes < *bytes) { 1487 + if (entry->bytes > *max_extent_size) 1488 + *max_extent_size = entry->bytes; 1495 1489 continue; 1490 + } 1496 1491 1497 1492 /* make sure the space returned is big enough 1498 1493 * to match our requested alignment 1499 1494 */ 1500 1495 if (*bytes >= align) { 1501 - ctl_off = entry->offset - ctl->start; 1502 - tmp = ctl_off + align - 1;; 1496 + tmp = entry->offset - ctl->start + align - 1; 1503 1497 do_div(tmp, align); 1504 1498 tmp = tmp * align + ctl->start; 1505 1499 align_off = tmp - entry->offset; ··· 1511 1499 tmp = entry->offset; 1512 1500 } 1513 1501 1514 - if (entry->bytes < *bytes + align_off) 1502 + if (entry->bytes < *bytes + align_off) { 1503 + if (entry->bytes > *max_extent_size) 1504 + *max_extent_size = entry->bytes; 1515 1505 continue; 1506 + } 1516 1507 1517 1508 if (entry->bitmap) { 1518 - ret = search_bitmap(ctl, entry, &tmp, bytes); 1509 + u64 size = *bytes; 1510 + 1511 + ret = search_bitmap(ctl, entry, &tmp, &size); 1519 1512 if (!ret) { 1520 1513 *offset = tmp; 1514 + *bytes = size; 1521 1515 return entry; 1516 + } else if (size > *max_extent_size) { 1517 + *max_extent_size = size; 1522 1518 } 1523 1519 continue; 1524 1520 } ··· 1535 1515 *bytes = entry->bytes - align_off; 1536 1516 return entry; 1537 1517 } 1538 - 1518 + out: 1539 1519 return NULL; 1540 1520 } 1541 1521 ··· 2136 2116 } 2137 2117 2138 2118 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 2139 - u64 offset, u64 bytes, u64 empty_size) 2119 + u64 offset, u64 bytes, u64 empty_size, 2120 + u64 *max_extent_size) 2140 2121 { 2141 2122 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2142 2123 struct btrfs_free_space *entry = NULL; ··· 2148 2127 2149 2128 spin_lock(&ctl->tree_lock); 2150 2129 entry = find_free_space(ctl, &offset, &bytes_search, 2151 - block_group->full_stripe_len); 2130 + block_group->full_stripe_len, max_extent_size); 2152 2131 if (!entry) 2153 2132 goto out; 2154 2133 ··· 2158 2137 if (!entry->bytes) 2159 2138 free_bitmap(ctl, entry); 2160 2139 } else { 2161 - 2162 2140 unlink_free_space(ctl, entry); 2163 2141 align_gap_len = offset - entry->offset; 2164 2142 align_gap = entry->offset; ··· 2171 2151 else 2172 2152 link_free_space(ctl, entry); 2173 2153 } 2174 - 2175 2154 out: 2176 2155 spin_unlock(&ctl->tree_lock); 2177 2156 ··· 2225 2206 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, 2226 2207 struct btrfs_free_cluster *cluster, 2227 2208 struct btrfs_free_space *entry, 2228 - u64 bytes, u64 min_start) 2209 + u64 bytes, u64 min_start, 2210 + u64 *max_extent_size) 2229 2211 { 2230 2212 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2231 2213 int err; ··· 2238 2218 search_bytes = bytes; 2239 2219 2240 2220 err = search_bitmap(ctl, entry, &search_start, &search_bytes); 2241 - if (err) 2221 + if (err) { 2222 + if (search_bytes > *max_extent_size) 2223 + *max_extent_size = search_bytes; 2242 2224 return 0; 2225 + } 2243 2226 2244 2227 ret = search_start; 2245 2228 __bitmap_clear_bits(ctl, entry, ret, bytes); ··· 2257 2234 */ 2258 2235 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 2259 2236 struct btrfs_free_cluster *cluster, u64 bytes, 2260 - u64 min_start) 2237 + u64 min_start, u64 *max_extent_size) 2261 2238 { 2262 2239 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2263 2240 struct btrfs_free_space *entry = NULL; ··· 2277 2254 2278 2255 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2279 2256 while(1) { 2257 + if (entry->bytes < bytes && entry->bytes > *max_extent_size) 2258 + *max_extent_size = entry->bytes; 2259 + 2280 2260 if (entry->bytes < bytes || 2281 2261 (!entry->bitmap && entry->offset < min_start)) { 2282 2262 node = rb_next(&entry->offset_index); ··· 2293 2267 if (entry->bitmap) { 2294 2268 ret = btrfs_alloc_from_bitmap(block_group, 2295 2269 cluster, entry, bytes, 2296 - cluster->window_start); 2270 + cluster->window_start, 2271 + max_extent_size); 2297 2272 if (ret == 0) { 2298 2273 node = rb_next(&entry->offset_index); 2299 2274 if (!node)
+3 -2
fs/btrfs/free-space-cache.h
··· 94 94 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache 95 95 *block_group); 96 96 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 97 - u64 offset, u64 bytes, u64 empty_size); 97 + u64 offset, u64 bytes, u64 empty_size, 98 + u64 *max_extent_size); 98 99 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); 99 100 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 100 101 u64 bytes); ··· 106 105 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); 107 106 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, 108 107 struct btrfs_free_cluster *cluster, u64 bytes, 109 - u64 min_start); 108 + u64 min_start, u64 *max_extent_size); 110 109 int btrfs_return_cluster_to_free_space( 111 110 struct btrfs_block_group_cache *block_group, 112 111 struct btrfs_free_cluster *cluster);
+11 -5
fs/btrfs/inode.c
··· 4688 4688 struct btrfs_inode *entry; 4689 4689 struct rb_node **p; 4690 4690 struct rb_node *parent; 4691 + struct rb_node *new = &BTRFS_I(inode)->rb_node; 4691 4692 u64 ino = btrfs_ino(inode); 4692 4693 4693 4694 if (inode_unhashed(inode)) 4694 4695 return; 4695 - again: 4696 4696 parent = NULL; 4697 4697 spin_lock(&root->inode_lock); 4698 4698 p = &root->inode_tree.rb_node; ··· 4707 4707 else { 4708 4708 WARN_ON(!(entry->vfs_inode.i_state & 4709 4709 (I_WILL_FREE | I_FREEING))); 4710 - rb_erase(parent, &root->inode_tree); 4710 + rb_replace_node(parent, new, &root->inode_tree); 4711 4711 RB_CLEAR_NODE(parent); 4712 4712 spin_unlock(&root->inode_lock); 4713 - goto again; 4713 + return; 4714 4714 } 4715 4715 } 4716 - rb_link_node(&BTRFS_I(inode)->rb_node, parent, p); 4717 - rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree); 4716 + rb_link_node(new, parent, p); 4717 + rb_insert_color(new, &root->inode_tree); 4718 4718 spin_unlock(&root->inode_lock); 4719 4719 } 4720 4720 ··· 8216 8216 8217 8217 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 8218 8218 if (unlikely(!work)) { 8219 + if (delay_iput) 8220 + btrfs_add_delayed_iput(inode); 8221 + else 8222 + iput(inode); 8219 8223 ret = -ENOMEM; 8220 8224 goto out; 8221 8225 } ··· 8617 8613 .removexattr = btrfs_removexattr, 8618 8614 .permission = btrfs_permission, 8619 8615 .get_acl = btrfs_get_acl, 8616 + .update_time = btrfs_update_time, 8620 8617 }; 8621 8618 static const struct inode_operations btrfs_dir_ro_inode_operations = { 8622 8619 .lookup = btrfs_lookup, 8623 8620 .permission = btrfs_permission, 8624 8621 .get_acl = btrfs_get_acl, 8622 + .update_time = btrfs_update_time, 8625 8623 }; 8626 8624 8627 8625 static const struct file_operations btrfs_dir_file_operations = {
+47 -33
fs/btrfs/ioctl.c
··· 574 574 if (ret) 575 575 return ret; 576 576 577 - btrfs_wait_ordered_extents(root, 0); 577 + btrfs_wait_ordered_extents(root); 578 578 579 579 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 580 580 if (!pending_snapshot) ··· 2696 2696 static long btrfs_ioctl_file_extent_same(struct file *file, 2697 2697 void __user *argp) 2698 2698 { 2699 - struct btrfs_ioctl_same_args *args = argp; 2700 - struct btrfs_ioctl_same_args same; 2701 - struct btrfs_ioctl_same_extent_info info; 2699 + struct btrfs_ioctl_same_args tmp; 2700 + struct btrfs_ioctl_same_args *same; 2701 + struct btrfs_ioctl_same_extent_info *info; 2702 2702 struct inode *src = file->f_dentry->d_inode; 2703 2703 struct file *dst_file = NULL; 2704 2704 struct inode *dst; ··· 2706 2706 u64 len; 2707 2707 int i; 2708 2708 int ret; 2709 + unsigned long size; 2709 2710 u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 2710 2711 bool is_admin = capable(CAP_SYS_ADMIN); 2711 2712 ··· 2717 2716 if (ret) 2718 2717 return ret; 2719 2718 2720 - if (copy_from_user(&same, 2719 + if (copy_from_user(&tmp, 2721 2720 (struct btrfs_ioctl_same_args __user *)argp, 2722 - sizeof(same))) { 2721 + sizeof(tmp))) { 2723 2722 ret = -EFAULT; 2724 2723 goto out; 2725 2724 } 2726 2725 2727 - off = same.logical_offset; 2728 - len = same.length; 2726 + size = sizeof(tmp) + 2727 + tmp.dest_count * sizeof(struct btrfs_ioctl_same_extent_info); 2728 + 2729 + same = kmalloc(size, GFP_NOFS); 2730 + if (!same) { 2731 + ret = -EFAULT; 2732 + goto out; 2733 + } 2734 + 2735 + if (copy_from_user(same, 2736 + (struct btrfs_ioctl_same_args __user *)argp, size)) { 2737 + ret = -EFAULT; 2738 + goto out; 2739 + } 2740 + 2741 + off = same->logical_offset; 2742 + len = same->length; 2729 2743 2730 2744 /* 2731 2745 * Limit the total length we will dedupe for each operation. ··· 2768 2752 if (!S_ISREG(src->i_mode)) 2769 2753 goto out; 2770 2754 2755 + /* pre-format output fields to sane values */ 2756 + for (i = 0; i < same->dest_count; i++) { 2757 + same->info[i].bytes_deduped = 0ULL; 2758 + same->info[i].status = 0; 2759 + } 2760 + 2771 2761 ret = 0; 2772 - for (i = 0; i < same.dest_count; i++) { 2773 - if (copy_from_user(&info, &args->info[i], sizeof(info))) { 2774 - ret = -EFAULT; 2775 - goto out; 2776 - } 2762 + for (i = 0; i < same->dest_count; i++) { 2763 + info = &same->info[i]; 2777 2764 2778 - info.bytes_deduped = 0; 2779 - 2780 - dst_file = fget(info.fd); 2765 + dst_file = fget(info->fd); 2781 2766 if (!dst_file) { 2782 - info.status = -EBADF; 2767 + info->status = -EBADF; 2783 2768 goto next; 2784 2769 } 2785 2770 2786 2771 if (!(is_admin || (dst_file->f_mode & FMODE_WRITE))) { 2787 - info.status = -EINVAL; 2772 + info->status = -EINVAL; 2788 2773 goto next; 2789 2774 } 2790 2775 2791 - info.status = -EXDEV; 2776 + info->status = -EXDEV; 2792 2777 if (file->f_path.mnt != dst_file->f_path.mnt) 2793 2778 goto next; 2794 2779 ··· 2798 2781 goto next; 2799 2782 2800 2783 if (S_ISDIR(dst->i_mode)) { 2801 - info.status = -EISDIR; 2784 + info->status = -EISDIR; 2802 2785 goto next; 2803 2786 } 2804 2787 2805 2788 if (!S_ISREG(dst->i_mode)) { 2806 - info.status = -EACCES; 2789 + info->status = -EACCES; 2807 2790 goto next; 2808 2791 } 2809 2792 2810 - info.status = btrfs_extent_same(src, off, len, dst, 2811 - info.logical_offset); 2812 - if (info.status == 0) 2813 - info.bytes_deduped += len; 2793 + info->status = btrfs_extent_same(src, off, len, dst, 2794 + info->logical_offset); 2795 + if (info->status == 0) 2796 + info->bytes_deduped += len; 2814 2797 2815 2798 next: 2816 2799 if (dst_file) 2817 2800 fput(dst_file); 2818 - 2819 - if (__put_user_unaligned(info.status, &args->info[i].status) || 2820 - __put_user_unaligned(info.bytes_deduped, 2821 - &args->info[i].bytes_deduped)) { 2822 - ret = -EFAULT; 2823 - goto out; 2824 - } 2825 2801 } 2802 + 2803 + ret = copy_to_user(argp, same, size); 2804 + if (ret) 2805 + ret = -EFAULT; 2826 2806 2827 2807 out: 2828 2808 mnt_drop_write_file(file); ··· 3324 3310 } 3325 3311 3326 3312 if (!objectid) 3327 - objectid = root->root_key.objectid; 3313 + objectid = BTRFS_FS_TREE_OBJECTID; 3328 3314 3329 3315 location.objectid = objectid; 3330 3316 location.type = BTRFS_ROOT_ITEM_KEY;
+3 -21
fs/btrfs/ordered-data.c
··· 563 563 * wait for all the ordered extents in a root. This is done when balancing 564 564 * space between drives. 565 565 */ 566 - void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) 566 + void btrfs_wait_ordered_extents(struct btrfs_root *root) 567 567 { 568 568 struct list_head splice, works; 569 569 struct btrfs_ordered_extent *ordered, *next; 570 - struct inode *inode; 571 570 572 571 INIT_LIST_HEAD(&splice); 573 572 INIT_LIST_HEAD(&works); ··· 579 580 root_extent_list); 580 581 list_move_tail(&ordered->root_extent_list, 581 582 &root->ordered_extents); 582 - /* 583 - * the inode may be getting freed (in sys_unlink path). 584 - */ 585 - inode = igrab(ordered->inode); 586 - if (!inode) { 587 - cond_resched_lock(&root->ordered_extent_lock); 588 - continue; 589 - } 590 - 591 583 atomic_inc(&ordered->refs); 592 584 spin_unlock(&root->ordered_extent_lock); 593 585 ··· 595 605 list_for_each_entry_safe(ordered, next, &works, work_list) { 596 606 list_del_init(&ordered->work_list); 597 607 wait_for_completion(&ordered->completion); 598 - 599 - inode = ordered->inode; 600 608 btrfs_put_ordered_extent(ordered); 601 - if (delay_iput) 602 - btrfs_add_delayed_iput(inode); 603 - else 604 - iput(inode); 605 - 606 609 cond_resched(); 607 610 } 608 611 mutex_unlock(&root->fs_info->ordered_operations_mutex); 609 612 } 610 613 611 - void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info, 612 - int delay_iput) 614 + void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info) 613 615 { 614 616 struct btrfs_root *root; 615 617 struct list_head splice; ··· 619 637 &fs_info->ordered_roots); 620 638 spin_unlock(&fs_info->ordered_root_lock); 621 639 622 - btrfs_wait_ordered_extents(root, delay_iput); 640 + btrfs_wait_ordered_extents(root); 623 641 btrfs_put_fs_root(root); 624 642 625 643 spin_lock(&fs_info->ordered_root_lock);
+2 -3
fs/btrfs/ordered-data.h
··· 195 195 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 196 196 struct btrfs_root *root, 197 197 struct inode *inode); 198 - void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput); 199 - void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info, 200 - int delay_iput); 198 + void btrfs_wait_ordered_extents(struct btrfs_root *root); 199 + void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info); 201 200 void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode); 202 201 void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid); 203 202 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
+25 -18
fs/btrfs/relocation.c
··· 1548 1548 btrfs_file_extent_other_encoding(leaf, fi)); 1549 1549 1550 1550 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) { 1551 - ret = 1; 1551 + ret = -EINVAL; 1552 1552 goto out; 1553 1553 } 1554 1554 ··· 1579 1579 u64 end; 1580 1580 u32 nritems; 1581 1581 u32 i; 1582 - int ret; 1582 + int ret = 0; 1583 1583 int first = 1; 1584 1584 int dirty = 0; 1585 1585 ··· 1642 1642 1643 1643 ret = get_new_location(rc->data_inode, &new_bytenr, 1644 1644 bytenr, num_bytes); 1645 - if (ret > 0) { 1646 - WARN_ON(1); 1647 - continue; 1645 + if (ret) { 1646 + /* 1647 + * Don't have to abort since we've not changed anything 1648 + * in the file extent yet. 1649 + */ 1650 + break; 1648 1651 } 1649 - BUG_ON(ret < 0); 1650 1652 1651 1653 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1652 1654 dirty = 1; ··· 1658 1656 num_bytes, parent, 1659 1657 btrfs_header_owner(leaf), 1660 1658 key.objectid, key.offset, 1); 1661 - BUG_ON(ret); 1659 + if (ret) { 1660 + btrfs_abort_transaction(trans, root, ret); 1661 + break; 1662 + } 1662 1663 1663 1664 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1664 1665 parent, btrfs_header_owner(leaf), 1665 1666 key.objectid, key.offset, 1); 1666 - BUG_ON(ret); 1667 + if (ret) { 1668 + btrfs_abort_transaction(trans, root, ret); 1669 + break; 1670 + } 1667 1671 } 1668 1672 if (dirty) 1669 1673 btrfs_mark_buffer_dirty(leaf); 1670 1674 if (inode) 1671 1675 btrfs_add_delayed_iput(inode); 1672 - return 0; 1676 + return ret; 1673 1677 } 1674 1678 1675 1679 static noinline_for_stack ··· 4246 4238 err = ret; 4247 4239 goto out; 4248 4240 } 4249 - btrfs_wait_all_ordered_extents(fs_info, 0); 4241 + btrfs_wait_all_ordered_extents(fs_info); 4250 4242 4251 4243 while (1) { 4252 4244 mutex_lock(&fs_info->cleaner_mutex); ··· 4507 4499 return ret; 4508 4500 } 4509 4501 4510 - void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4511 - struct btrfs_root *root, struct extent_buffer *buf, 4512 - struct extent_buffer *cow) 4502 + int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4503 + struct btrfs_root *root, struct extent_buffer *buf, 4504 + struct extent_buffer *cow) 4513 4505 { 4514 4506 struct reloc_control *rc; 4515 4507 struct backref_node *node; 4516 4508 int first_cow = 0; 4517 4509 int level; 4518 - int ret; 4510 + int ret = 0; 4519 4511 4520 4512 rc = root->fs_info->reloc_ctl; 4521 4513 if (!rc) 4522 - return; 4514 + return 0; 4523 4515 4524 4516 BUG_ON(rc->stage == UPDATE_DATA_PTRS && 4525 4517 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); ··· 4555 4547 rc->nodes_relocated += buf->len; 4556 4548 } 4557 4549 4558 - if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) { 4550 + if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) 4559 4551 ret = replace_file_extents(trans, rc, root, cow); 4560 - BUG_ON(ret); 4561 - } 4552 + return ret; 4562 4553 } 4563 4554 4564 4555 /*
+98 -14
fs/btrfs/scrub.c
··· 158 158 int mirror_num; 159 159 }; 160 160 161 + struct scrub_nocow_inode { 162 + u64 inum; 163 + u64 offset; 164 + u64 root; 165 + struct list_head list; 166 + }; 167 + 161 168 struct scrub_copy_nocow_ctx { 162 169 struct scrub_ctx *sctx; 163 170 u64 logical; 164 171 u64 len; 165 172 int mirror_num; 166 173 u64 physical_for_dev_replace; 174 + struct list_head inodes; 167 175 struct btrfs_work work; 168 176 }; 169 177 ··· 253 245 static int write_page_nocow(struct scrub_ctx *sctx, 254 246 u64 physical_for_dev_replace, struct page *page); 255 247 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 256 - void *ctx); 248 + struct scrub_copy_nocow_ctx *ctx); 257 249 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 258 250 int mirror_num, u64 physical_for_dev_replace); 259 251 static void copy_nocow_pages_worker(struct btrfs_work *work); ··· 3134 3126 nocow_ctx->mirror_num = mirror_num; 3135 3127 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; 3136 3128 nocow_ctx->work.func = copy_nocow_pages_worker; 3129 + INIT_LIST_HEAD(&nocow_ctx->inodes); 3137 3130 btrfs_queue_worker(&fs_info->scrub_nocow_workers, 3138 3131 &nocow_ctx->work); 3139 3132 3140 3133 return 0; 3141 3134 } 3135 + 3136 + static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx) 3137 + { 3138 + struct scrub_copy_nocow_ctx *nocow_ctx = ctx; 3139 + struct scrub_nocow_inode *nocow_inode; 3140 + 3141 + nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS); 3142 + if (!nocow_inode) 3143 + return -ENOMEM; 3144 + nocow_inode->inum = inum; 3145 + nocow_inode->offset = offset; 3146 + nocow_inode->root = root; 3147 + list_add_tail(&nocow_inode->list, &nocow_ctx->inodes); 3148 + return 0; 3149 + } 3150 + 3151 + #define COPY_COMPLETE 1 3142 3152 3143 3153 static void copy_nocow_pages_worker(struct btrfs_work *work) 3144 3154 { ··· 3193 3167 } 3194 3168 3195 3169 ret = iterate_inodes_from_logical(logical, fs_info, path, 3196 - copy_nocow_pages_for_inode, 3197 - nocow_ctx); 3170 + record_inode_for_nocow, nocow_ctx); 3198 3171 if (ret != 0 && ret != -ENOENT) { 3199 3172 pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n", 3200 3173 logical, physical_for_dev_replace, len, mirror_num, ··· 3202 3177 goto out; 3203 3178 } 3204 3179 3180 + btrfs_end_transaction(trans, root); 3181 + trans = NULL; 3182 + while (!list_empty(&nocow_ctx->inodes)) { 3183 + struct scrub_nocow_inode *entry; 3184 + entry = list_first_entry(&nocow_ctx->inodes, 3185 + struct scrub_nocow_inode, 3186 + list); 3187 + list_del_init(&entry->list); 3188 + ret = copy_nocow_pages_for_inode(entry->inum, entry->offset, 3189 + entry->root, nocow_ctx); 3190 + kfree(entry); 3191 + if (ret == COPY_COMPLETE) { 3192 + ret = 0; 3193 + break; 3194 + } else if (ret) { 3195 + break; 3196 + } 3197 + } 3205 3198 out: 3199 + while (!list_empty(&nocow_ctx->inodes)) { 3200 + struct scrub_nocow_inode *entry; 3201 + entry = list_first_entry(&nocow_ctx->inodes, 3202 + struct scrub_nocow_inode, 3203 + list); 3204 + list_del_init(&entry->list); 3205 + kfree(entry); 3206 + } 3206 3207 if (trans && !IS_ERR(trans)) 3207 3208 btrfs_end_transaction(trans, root); 3208 3209 if (not_written) ··· 3241 3190 scrub_pending_trans_workers_dec(sctx); 3242 3191 } 3243 3192 3244 - static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) 3193 + static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 3194 + struct scrub_copy_nocow_ctx *nocow_ctx) 3245 3195 { 3246 - struct scrub_copy_nocow_ctx *nocow_ctx = ctx; 3247 3196 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; 3248 3197 struct btrfs_key key; 3249 3198 struct inode *inode; 3250 3199 struct page *page; 3251 3200 struct btrfs_root *local_root; 3201 + struct btrfs_ordered_extent *ordered; 3202 + struct extent_map *em; 3203 + struct extent_state *cached_state = NULL; 3204 + struct extent_io_tree *io_tree; 3252 3205 u64 physical_for_dev_replace; 3253 - u64 len; 3206 + u64 len = nocow_ctx->len; 3207 + u64 lockstart = offset, lockend = offset + len - 1; 3254 3208 unsigned long index; 3255 3209 int srcu_index; 3256 - int ret; 3257 - int err; 3210 + int ret = 0; 3211 + int err = 0; 3258 3212 3259 3213 key.objectid = root; 3260 3214 key.type = BTRFS_ROOT_ITEM_KEY; ··· 3285 3229 mutex_lock(&inode->i_mutex); 3286 3230 inode_dio_wait(inode); 3287 3231 3288 - ret = 0; 3289 3232 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 3290 - len = nocow_ctx->len; 3233 + io_tree = &BTRFS_I(inode)->io_tree; 3234 + 3235 + lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state); 3236 + ordered = btrfs_lookup_ordered_range(inode, lockstart, len); 3237 + if (ordered) { 3238 + btrfs_put_ordered_extent(ordered); 3239 + goto out_unlock; 3240 + } 3241 + 3242 + em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0); 3243 + if (IS_ERR(em)) { 3244 + ret = PTR_ERR(em); 3245 + goto out_unlock; 3246 + } 3247 + 3248 + /* 3249 + * This extent does not actually cover the logical extent anymore, 3250 + * move on to the next inode. 3251 + */ 3252 + if (em->block_start > nocow_ctx->logical || 3253 + em->block_start + em->block_len < nocow_ctx->logical + len) { 3254 + free_extent_map(em); 3255 + goto out_unlock; 3256 + } 3257 + free_extent_map(em); 3258 + 3291 3259 while (len >= PAGE_CACHE_SIZE) { 3292 3260 index = offset >> PAGE_CACHE_SHIFT; 3293 3261 again: ··· 3327 3247 goto next_page; 3328 3248 } else { 3329 3249 ClearPageError(page); 3330 - err = extent_read_full_page(&BTRFS_I(inode)-> 3331 - io_tree, 3332 - page, btrfs_get_extent, 3333 - nocow_ctx->mirror_num); 3250 + err = extent_read_full_page_nolock(io_tree, page, 3251 + btrfs_get_extent, 3252 + nocow_ctx->mirror_num); 3334 3253 if (err) { 3335 3254 ret = err; 3336 3255 goto next_page; ··· 3343 3264 * page in the page cache. 3344 3265 */ 3345 3266 if (page->mapping != inode->i_mapping) { 3267 + unlock_page(page); 3346 3268 page_cache_release(page); 3347 3269 goto again; 3348 3270 } ··· 3367 3287 physical_for_dev_replace += PAGE_CACHE_SIZE; 3368 3288 len -= PAGE_CACHE_SIZE; 3369 3289 } 3290 + ret = COPY_COMPLETE; 3291 + out_unlock: 3292 + unlock_extent_cached(io_tree, lockstart, lockend, &cached_state, 3293 + GFP_NOFS); 3370 3294 out: 3371 3295 mutex_unlock(&inode->i_mutex); 3372 3296 iput(inode);
+20 -1
fs/btrfs/super.c
··· 921 921 return 0; 922 922 } 923 923 924 - btrfs_wait_all_ordered_extents(fs_info, 1); 924 + btrfs_wait_all_ordered_extents(fs_info); 925 925 926 926 trans = btrfs_attach_transaction_barrier(root); 927 927 if (IS_ERR(trans)) { ··· 1340 1340 if (ret) 1341 1341 goto restore; 1342 1342 } else { 1343 + if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) { 1344 + btrfs_err(fs_info, 1345 + "Remounting read-write after error is not allowed\n"); 1346 + ret = -EINVAL; 1347 + goto restore; 1348 + } 1343 1349 if (fs_info->fs_devices->rw_devices == 0) { 1344 1350 ret = -EACCES; 1345 1351 goto restore; ··· 1382 1376 if (ret) { 1383 1377 pr_warn("btrfs: failed to resume dev_replace\n"); 1384 1378 goto restore; 1379 + } 1380 + 1381 + if (!fs_info->uuid_root) { 1382 + pr_info("btrfs: creating UUID tree\n"); 1383 + ret = btrfs_create_uuid_tree(fs_info); 1384 + if (ret) { 1385 + pr_warn("btrfs: failed to create the uuid tree" 1386 + "%d\n", ret); 1387 + goto restore; 1388 + } 1385 1389 } 1386 1390 sb->s_flags &= ~MS_RDONLY; 1387 1391 } ··· 1777 1761 printk(KERN_INFO "Btrfs loaded" 1778 1762 #ifdef CONFIG_BTRFS_DEBUG 1779 1763 ", debug=on" 1764 + #endif 1765 + #ifdef CONFIG_BTRFS_ASSERT 1766 + ", assert=on" 1780 1767 #endif 1781 1768 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1782 1769 ", integrity-checker=on"
+1 -1
fs/btrfs/transaction.c
··· 1603 1603 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1604 1604 { 1605 1605 if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) 1606 - btrfs_wait_all_ordered_extents(fs_info, 1); 1606 + btrfs_wait_all_ordered_extents(fs_info); 1607 1607 } 1608 1608 1609 1609 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+48 -4
fs/btrfs/tree-log.c
··· 93 93 */ 94 94 #define LOG_WALK_PIN_ONLY 0 95 95 #define LOG_WALK_REPLAY_INODES 1 96 - #define LOG_WALK_REPLAY_ALL 2 96 + #define LOG_WALK_REPLAY_DIR_INDEX 2 97 + #define LOG_WALK_REPLAY_ALL 3 97 98 98 99 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 99 100 struct btrfs_root *root, struct inode *inode, ··· 394 393 if (inode_item) { 395 394 struct btrfs_inode_item *item; 396 395 u64 nbytes; 396 + u32 mode; 397 397 398 398 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 399 399 struct btrfs_inode_item); ··· 402 400 item = btrfs_item_ptr(eb, slot, 403 401 struct btrfs_inode_item); 404 402 btrfs_set_inode_nbytes(eb, item, nbytes); 403 + 404 + /* 405 + * If this is a directory we need to reset the i_size to 406 + * 0 so that we can set it up properly when replaying 407 + * the rest of the items in this log. 408 + */ 409 + mode = btrfs_inode_mode(eb, item); 410 + if (S_ISDIR(mode)) 411 + btrfs_set_inode_size(eb, item, 0); 405 412 } 406 413 } else if (inode_item) { 407 414 struct btrfs_inode_item *item; 415 + u32 mode; 408 416 409 417 /* 410 418 * New inode, set nbytes to 0 so that the nbytes comes out ··· 422 410 */ 423 411 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 424 412 btrfs_set_inode_nbytes(eb, item, 0); 413 + 414 + /* 415 + * If this is a directory we need to reset the i_size to 0 so 416 + * that we can set it up properly when replaying the rest of 417 + * the items in this log. 418 + */ 419 + mode = btrfs_inode_mode(eb, item); 420 + if (S_ISDIR(mode)) 421 + btrfs_set_inode_size(eb, item, 0); 425 422 } 426 423 insert: 427 424 btrfs_release_path(path); ··· 1517 1496 iput(inode); 1518 1497 return -EIO; 1519 1498 } 1499 + 1520 1500 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1521 1501 1522 1502 /* FIXME, put inode into FIXUP list */ ··· 1556 1534 u8 log_type; 1557 1535 int exists; 1558 1536 int ret = 0; 1537 + bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1559 1538 1560 1539 dir = read_one_inode(root, key->objectid); 1561 1540 if (!dir) ··· 1627 1604 goto insert; 1628 1605 out: 1629 1606 btrfs_release_path(path); 1607 + if (!ret && update_size) { 1608 + btrfs_i_size_write(dir, dir->i_size + name_len * 2); 1609 + ret = btrfs_update_inode(trans, root, dir); 1610 + } 1630 1611 kfree(name); 1631 1612 iput(dir); 1632 1613 return ret; ··· 1641 1614 name, name_len, log_type, &log_key); 1642 1615 if (ret && ret != -ENOENT) 1643 1616 goto out; 1617 + update_size = false; 1644 1618 ret = 0; 1645 1619 goto out; 1646 1620 } ··· 2055 2027 if (ret) 2056 2028 break; 2057 2029 } 2030 + 2031 + if (key.type == BTRFS_DIR_INDEX_KEY && 2032 + wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2033 + ret = replay_one_dir_item(wc->trans, root, path, 2034 + eb, i, &key); 2035 + if (ret) 2036 + break; 2037 + } 2038 + 2058 2039 if (wc->stage < LOG_WALK_REPLAY_ALL) 2059 2040 continue; 2060 2041 ··· 2085 2048 eb, i, &key); 2086 2049 if (ret) 2087 2050 break; 2088 - } else if (key.type == BTRFS_DIR_ITEM_KEY || 2089 - key.type == BTRFS_DIR_INDEX_KEY) { 2051 + } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2090 2052 ret = replay_one_dir_item(wc->trans, root, path, 2091 2053 eb, i, &key); 2092 2054 if (ret) ··· 3841 3805 int ret = 0; 3842 3806 struct btrfs_root *root; 3843 3807 struct dentry *old_parent = NULL; 3808 + struct inode *orig_inode = inode; 3844 3809 3845 3810 /* 3846 3811 * for regular files, if its inode is already on disk, we don't ··· 3861 3824 } 3862 3825 3863 3826 while (1) { 3864 - BTRFS_I(inode)->logged_trans = trans->transid; 3827 + /* 3828 + * If we are logging a directory then we start with our inode, 3829 + * not our parents inode, so we need to skipp setting the 3830 + * logged_trans so that further down in the log code we don't 3831 + * think this inode has already been logged. 3832 + */ 3833 + if (inode != orig_inode) 3834 + BTRFS_I(inode)->logged_trans = trans->transid; 3865 3835 smp_mb(); 3866 3836 3867 3837 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+4 -3
fs/btrfs/volumes.c
··· 796 796 fs_devices->rotating = 1; 797 797 798 798 fs_devices->open_devices++; 799 - if (device->writeable && !device->is_tgtdev_for_dev_replace) { 799 + if (device->writeable && 800 + device->devid != BTRFS_DEV_REPLACE_DEVID) { 800 801 fs_devices->rw_devices++; 801 802 list_add(&device->dev_alloc_list, 802 803 &fs_devices->alloc_list); ··· 912 911 if (disk_super->label[0]) { 913 912 if (disk_super->label[BTRFS_LABEL_SIZE - 1]) 914 913 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0'; 915 - printk(KERN_INFO "device label %s ", disk_super->label); 914 + printk(KERN_INFO "btrfs: device label %s ", disk_super->label); 916 915 } else { 917 - printk(KERN_INFO "device fsid %pU ", disk_super->fsid); 916 + printk(KERN_INFO "btrfs: device fsid %pU ", disk_super->fsid); 918 917 } 919 918 920 919 printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
+1 -1
fs/cachefiles/namei.c
··· 56 56 object->fscache.cookie->parent, 57 57 object->fscache.cookie->netfs_data, 58 58 object->fscache.cookie->flags); 59 - if (keybuf) 59 + if (keybuf && cookie->def) 60 60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf, 61 61 CACHEFILES_KEYBUF_SIZE); 62 62 else
+15 -14
fs/cachefiles/xattr.c
··· 162 162 int cachefiles_check_auxdata(struct cachefiles_object *object) 163 163 { 164 164 struct cachefiles_xattr *auxbuf; 165 + enum fscache_checkaux validity; 165 166 struct dentry *dentry = object->dentry; 166 - unsigned int dlen; 167 + ssize_t xlen; 167 168 int ret; 168 169 169 170 ASSERT(dentry); ··· 175 174 if (!auxbuf) 176 175 return -ENOMEM; 177 176 178 - auxbuf->len = vfs_getxattr(dentry, cachefiles_xattr_cache, 179 - &auxbuf->type, 512 + 1); 180 - if (auxbuf->len < 1) 181 - return -ESTALE; 177 + xlen = vfs_getxattr(dentry, cachefiles_xattr_cache, 178 + &auxbuf->type, 512 + 1); 179 + ret = -ESTALE; 180 + if (xlen < 1 || 181 + auxbuf->type != object->fscache.cookie->def->type) 182 + goto error; 182 183 183 - if (auxbuf->type != object->fscache.cookie->def->type) 184 - return -ESTALE; 184 + xlen--; 185 + validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen); 186 + if (validity != FSCACHE_CHECKAUX_OKAY) 187 + goto error; 185 188 186 - dlen = auxbuf->len - 1; 187 - ret = fscache_check_aux(&object->fscache, &auxbuf->data, dlen); 188 - 189 + ret = 0; 190 + error: 189 191 kfree(auxbuf); 190 - if (ret != FSCACHE_CHECKAUX_OKAY) 191 - return -ESTALE; 192 - 193 - return 0; 192 + return ret; 194 193 } 195 194 196 195 /*
+1
fs/cifs/dir.c
··· 500 500 if (server->ops->close) 501 501 server->ops->close(xid, tcon, &fid); 502 502 cifs_del_pending_open(&open); 503 + fput(file); 503 504 rc = -ENOMEM; 504 505 } 505 506
+2 -1
fs/fscache/cookie.c
··· 586 586 587 587 fscache_operation_init(op, NULL, NULL); 588 588 op->flags = FSCACHE_OP_MYTHREAD | 589 - (1 << FSCACHE_OP_WAITING); 589 + (1 << FSCACHE_OP_WAITING) | 590 + (1 << FSCACHE_OP_UNUSE_COOKIE); 590 591 591 592 spin_lock(&cookie->lock); 592 593
+3 -1
fs/gfs2/inode.c
··· 694 694 695 695 mark_inode_dirty(inode); 696 696 d_instantiate(dentry, inode); 697 - if (file) 697 + if (file) { 698 + *opened |= FILE_CREATED; 698 699 error = finish_open(file, dentry, gfs2_open_common, opened); 700 + } 699 701 gfs2_glock_dq_uninit(ghs); 700 702 gfs2_glock_dq_uninit(ghs + 1); 701 703 return error;
+22 -12
fs/namei.c
··· 2656 2656 int acc_mode; 2657 2657 int create_error = 0; 2658 2658 struct dentry *const DENTRY_NOT_SET = (void *) -1UL; 2659 + bool excl; 2659 2660 2660 2661 BUG_ON(dentry->d_inode); 2661 2662 ··· 2670 2669 if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) 2671 2670 mode &= ~current_umask(); 2672 2671 2673 - if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) { 2672 + excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); 2673 + if (excl) 2674 2674 open_flag &= ~O_TRUNC; 2675 - *opened |= FILE_CREATED; 2676 - } 2677 2675 2678 2676 /* 2679 2677 * Checking write permission is tricky, bacuse we don't know if we are ··· 2725 2725 goto out; 2726 2726 } 2727 2727 2728 - acc_mode = op->acc_mode; 2729 - if (*opened & FILE_CREATED) { 2730 - fsnotify_create(dir, dentry); 2731 - acc_mode = MAY_OPEN; 2732 - } 2733 - 2734 2728 if (error) { /* returned 1, that is */ 2735 2729 if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { 2736 2730 error = -EIO; ··· 2734 2740 dput(dentry); 2735 2741 dentry = file->f_path.dentry; 2736 2742 } 2737 - if (create_error && dentry->d_inode == NULL) { 2738 - error = create_error; 2739 - goto out; 2743 + if (*opened & FILE_CREATED) 2744 + fsnotify_create(dir, dentry); 2745 + if (!dentry->d_inode) { 2746 + WARN_ON(*opened & FILE_CREATED); 2747 + if (create_error) { 2748 + error = create_error; 2749 + goto out; 2750 + } 2751 + } else { 2752 + if (excl && !(*opened & FILE_CREATED)) { 2753 + error = -EEXIST; 2754 + goto out; 2755 + } 2740 2756 } 2741 2757 goto looked_up; 2742 2758 } ··· 2755 2751 * We didn't have the inode before the open, so check open permission 2756 2752 * here. 2757 2753 */ 2754 + acc_mode = op->acc_mode; 2755 + if (*opened & FILE_CREATED) { 2756 + WARN_ON(!(open_flag & O_CREAT)); 2757 + fsnotify_create(dir, dentry); 2758 + acc_mode = MAY_OPEN; 2759 + } 2758 2760 error = may_open(&file->f_path, acc_mode, open_flag); 2759 2761 if (error) 2760 2762 fput(file);
+3
fs/nfs/dir.c
··· 1392 1392 { 1393 1393 int err; 1394 1394 1395 + if ((open_flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 1396 + *opened |= FILE_CREATED; 1397 + 1395 1398 err = finish_open(file, dentry, do_open, opened); 1396 1399 if (err) 1397 1400 goto out;
+18 -3
fs/open.c
··· 744 744 745 745 /** 746 746 * finish_open - finish opening a file 747 - * @od: opaque open data 747 + * @file: file pointer 748 748 * @dentry: pointer to dentry 749 749 * @open: open callback 750 + * @opened: state of open 750 751 * 751 752 * This can be used to finish opening a file passed to i_op->atomic_open(). 752 753 * 753 754 * If the open callback is set to NULL, then the standard f_op->open() 754 755 * filesystem callback is substituted. 756 + * 757 + * NB: the dentry reference is _not_ consumed. If, for example, the dentry is 758 + * the return value of d_splice_alias(), then the caller needs to perform dput() 759 + * on it after finish_open(). 760 + * 761 + * On successful return @file is a fully instantiated open file. After this, if 762 + * an error occurs in ->atomic_open(), it needs to clean up with fput(). 763 + * 764 + * Returns zero on success or -errno if the open failed. 755 765 */ 756 766 int finish_open(struct file *file, struct dentry *dentry, 757 767 int (*open)(struct inode *, struct file *), ··· 782 772 /** 783 773 * finish_no_open - finish ->atomic_open() without opening the file 784 774 * 785 - * @od: opaque open data 775 + * @file: file pointer 786 776 * @dentry: dentry or NULL (as returned from ->lookup()) 787 777 * 788 778 * This can be used to set the result of a successful lookup in ->atomic_open(). 789 - * The filesystem's atomic_open() method shall return NULL after calling this. 779 + * 780 + * NB: unlike finish_open() this function does consume the dentry reference and 781 + * the caller need not dput() it. 782 + * 783 + * Returns "1" which must be the return value of ->atomic_open() after having 784 + * called this function. 790 785 */ 791 786 int finish_no_open(struct file *file, struct dentry *dentry) 792 787 {
+23 -6
fs/pstore/platform.c
··· 168 168 int err, ret; 169 169 170 170 ret = -EIO; 171 - err = zlib_inflateInit(&stream); 171 + err = zlib_inflateInit2(&stream, WINDOW_BITS); 172 172 if (err != Z_OK) 173 173 goto error; 174 174 ··· 195 195 static void allocate_buf_for_compression(void) 196 196 { 197 197 size_t size; 198 + size_t cmpr; 198 199 199 - big_oops_buf_sz = (psinfo->bufsize * 100) / 45; 200 + switch (psinfo->bufsize) { 201 + /* buffer range for efivars */ 202 + case 1000 ... 2000: 203 + cmpr = 56; 204 + break; 205 + case 2001 ... 3000: 206 + cmpr = 54; 207 + break; 208 + case 3001 ... 3999: 209 + cmpr = 52; 210 + break; 211 + /* buffer range for nvram, erst */ 212 + case 4000 ... 10000: 213 + cmpr = 45; 214 + break; 215 + default: 216 + cmpr = 60; 217 + break; 218 + } 219 + 220 + big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr; 200 221 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 201 222 if (big_oops_buf) { 202 223 size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), ··· 316 295 compressed = true; 317 296 total_len = zipped_len; 318 297 } else { 319 - pr_err("pstore: compression failed for Part %d" 320 - " returned %d\n", part, zipped_len); 321 - pr_err("pstore: Capture uncompressed" 322 - " oops/panic report of Part %d\n", part); 323 298 compressed = false; 324 299 total_len = copy_kmsg_to_buffer(hsize, len); 325 300 }
+3 -4
include/drm/drmP.h
··· 1322 1322 extern int drm_rmctx(struct drm_device *dev, void *data, 1323 1323 struct drm_file *file_priv); 1324 1324 1325 - extern void drm_legacy_ctxbitmap_init(struct drm_device *dev); 1326 - extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); 1327 - extern void drm_legacy_ctxbitmap_release(struct drm_device *dev, 1328 - struct drm_file *file_priv); 1325 + extern int drm_ctxbitmap_init(struct drm_device *dev); 1326 + extern void drm_ctxbitmap_cleanup(struct drm_device *dev); 1327 + extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); 1329 1328 1330 1329 extern int drm_setsareactx(struct drm_device *dev, void *data, 1331 1330 struct drm_file *file_priv);
+3
include/drm/drm_pciids.h
··· 12 12 {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 13 13 {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 14 14 {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 15 + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 15 16 {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 16 17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 19 + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 20 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 19 21 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 22 + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 20 23 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 21 24 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 22 25 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+11
include/linux/blkdev.h
··· 862 862 return blk_queue_get_max_sectors(q, rq->cmd_flags); 863 863 } 864 864 865 + static inline unsigned int blk_rq_count_bios(struct request *rq) 866 + { 867 + unsigned int nr_bios = 0; 868 + struct bio *bio; 869 + 870 + __rq_for_each_bio(bio, rq) 871 + nr_bios++; 872 + 873 + return nr_bios; 874 + } 875 + 865 876 /* 866 877 * Request issue related functions. 867 878 */
+2
include/linux/ceph/osd_client.h
··· 335 335 struct ceph_osd_request *req); 336 336 extern void ceph_osdc_sync(struct ceph_osd_client *osdc); 337 337 338 + extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc); 339 + 338 340 extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, 339 341 struct ceph_vino vino, 340 342 struct ceph_file_layout *layout,
+4
include/linux/hid.h
··· 756 756 struct hid_device *hid_allocate_device(void); 757 757 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); 758 758 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 759 + struct hid_report *hid_validate_values(struct hid_device *hid, 760 + unsigned int type, unsigned int id, 761 + unsigned int field_index, 762 + unsigned int report_counts); 759 763 int hid_open_report(struct hid_device *device); 760 764 int hid_check_keys_pressed(struct hid_device *hid); 761 765 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+1
include/linux/kvm_host.h
··· 533 533 534 534 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 535 535 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 536 + unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 536 537 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 537 538 void kvm_release_page_clean(struct page *page); 538 539 void kvm_release_page_dirty(struct page *page);
+4 -4
include/linux/netdevice.h
··· 950 950 * multiple net devices on single physical port. 951 951 * 952 952 * void (*ndo_add_vxlan_port)(struct net_device *dev, 953 - * sa_family_t sa_family, __u16 port); 953 + * sa_family_t sa_family, __be16 port); 954 954 * Called by vxlan to notiy a driver about the UDP port and socket 955 955 * address family that vxlan is listnening to. It is called only when 956 956 * a new port starts listening. The operation is protected by the 957 957 * vxlan_net->sock_lock. 958 958 * 959 959 * void (*ndo_del_vxlan_port)(struct net_device *dev, 960 - * sa_family_t sa_family, __u16 port); 960 + * sa_family_t sa_family, __be16 port); 961 961 * Called by vxlan to notify the driver about a UDP port and socket 962 962 * address family that vxlan is not listening to anymore. The operation 963 963 * is protected by the vxlan_net->sock_lock. ··· 1093 1093 struct netdev_phys_port_id *ppid); 1094 1094 void (*ndo_add_vxlan_port)(struct net_device *dev, 1095 1095 sa_family_t sa_family, 1096 - __u16 port); 1096 + __be16 port); 1097 1097 void (*ndo_del_vxlan_port)(struct net_device *dev, 1098 1098 sa_family_t sa_family, 1099 - __u16 port); 1099 + __be16 port); 1100 1100 }; 1101 1101 1102 1102 /*
+4 -2
include/linux/netfilter/ipset/ip_set.h
··· 296 296 297 297 /* Match elements marked with nomatch */ 298 298 static inline bool 299 - ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt) 299 + ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set) 300 300 { 301 301 return adt == IPSET_TEST && 302 - ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH); 302 + (set->type->features & IPSET_TYPE_NOMATCH) && 303 + ((flags >> 16) & IPSET_FLAG_NOMATCH) && 304 + (ret > 0 || ret == -ENOTEMPTY); 303 305 } 304 306 305 307 /* Check the NLA_F_NET_BYTEORDER flag */
+1
include/linux/timex.h
··· 141 141 extern void hardpps(const struct timespec *, const struct timespec *); 142 142 143 143 int read_current_timer(unsigned long *timer_val); 144 + void ntp_notify_cmos_timer(void); 144 145 145 146 /* The clock frequency of the i8253/i8254 PIT */ 146 147 #define PIT_TICK_RATE 1193182ul
+8 -4
include/net/ip.h
··· 264 264 265 265 extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); 266 266 267 - static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk) 267 + static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) 268 268 { 269 - if (iph->frag_off & htons(IP_DF)) { 269 + struct iphdr *iph = ip_hdr(skb); 270 + 271 + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { 270 272 /* This is only to work around buggy Windows95/2000 271 273 * VJ compression implementations. If the ID field 272 274 * does not change, they drop every other packet in ··· 280 278 __ip_select_ident(iph, dst, 0); 281 279 } 282 280 283 - static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more) 281 + static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) 284 282 { 285 - if (iph->frag_off & htons(IP_DF)) { 283 + struct iphdr *iph = ip_hdr(skb); 284 + 285 + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { 286 286 if (sk && inet_sk(sk)->inet_daddr) { 287 287 iph->id = htons(inet_sk(sk)->inet_id); 288 288 inet_sk(sk)->inet_id += 1 + more;
+1 -1
include/net/netfilter/nf_conntrack_extend.h
··· 86 86 static inline void nf_ct_ext_free(struct nf_conn *ct) 87 87 { 88 88 if (ct->ext) 89 - kfree(ct->ext); 89 + kfree_rcu(ct->ext, rcu); 90 90 } 91 91 92 92 /* Add this type, returns pointer to data or NULL. */
+4 -2
include/trace/events/block.h
··· 618 618 __field( unsigned int, nr_sector ) 619 619 __field( dev_t, old_dev ) 620 620 __field( sector_t, old_sector ) 621 + __field( unsigned int, nr_bios ) 621 622 __array( char, rwbs, RWBS_LEN) 622 623 ), 623 624 ··· 628 627 __entry->nr_sector = blk_rq_sectors(rq); 629 628 __entry->old_dev = dev; 630 629 __entry->old_sector = from; 630 + __entry->nr_bios = blk_rq_count_bios(rq); 631 631 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 632 632 ), 633 633 634 - TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 634 + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", 635 635 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 636 636 (unsigned long long)__entry->sector, 637 637 __entry->nr_sector, 638 638 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 639 - (unsigned long long)__entry->old_sector) 639 + (unsigned long long)__entry->old_sector, __entry->nr_bios) 640 640 ); 641 641 642 642 #endif /* _TRACE_BLOCK_H */
+1
include/trace/events/btrfs.h
··· 42 42 { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \ 43 43 { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \ 44 44 { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ 45 + { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \ 45 46 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) 46 47 47 48 #define show_root_type(obj) \
+1 -1
include/uapi/linux/perf_event.h
··· 324 324 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 325 325 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 326 326 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 327 - #define PERF_EVENT_IOC_ID _IOR('$', 7, u64 *) 327 + #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 328 328 329 329 enum perf_event_ioc_flags { 330 330 PERF_IOC_FLAG_GROUP = 1U << 0,
+3 -3
kernel/sched/debug.c
··· 124 124 SEQ_printf(m, " "); 125 125 126 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 127 - p->comm, p->pid, 127 + p->comm, task_pid_nr(p), 128 128 SPLIT_NS(p->se.vruntime), 129 129 (long long)(p->nvcsw + p->nivcsw), 130 130 p->prio); ··· 289 289 P(nr_load_updates); 290 290 P(nr_uninterruptible); 291 291 PN(next_balance); 292 - P(curr->pid); 292 + SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 293 293 PN(clock); 294 294 P(cpu_load[0]); 295 295 P(cpu_load[1]); ··· 492 492 { 493 493 unsigned long nr_switches; 494 494 495 - SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, 495 + SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), 496 496 get_nr_threads(p)); 497 497 SEQ_printf(m, 498 498 "---------------------------------------------------------"
+9 -5
kernel/sched/fair.c
··· 5928 5928 cfs_rq = task_cfs_rq(current); 5929 5929 curr = cfs_rq->curr; 5930 5930 5931 - if (unlikely(task_cpu(p) != this_cpu)) { 5932 - rcu_read_lock(); 5933 - __set_task_cpu(p, this_cpu); 5934 - rcu_read_unlock(); 5935 - } 5931 + /* 5932 + * Not only the cpu but also the task_group of the parent might have 5933 + * been changed after parent->se.parent,cfs_rq were copied to 5934 + * child->se.parent,cfs_rq. So call __set_task_cpu() to make those 5935 + * of child point to valid ones. 5936 + */ 5937 + rcu_read_lock(); 5938 + __set_task_cpu(p, this_cpu); 5939 + rcu_read_unlock(); 5936 5940 5937 5941 update_curr(cfs_rq); 5938 5942
+3 -2
kernel/sched/stats.h
··· 104 104 } 105 105 106 106 /* 107 - * Called when a process ceases being the active-running process, either 108 - * voluntarily or involuntarily. Now we can calculate how long we ran. 107 + * Called when a process ceases being the active-running process involuntarily 108 + * due, typically, to expiring its time slice (this may also be called when 109 + * switching to the idle task). Now we can calculate how long we ran. 109 110 * Also, if the process is still in the TASK_RUNNING state, call 110 111 * sched_info_queued() to mark that it has now again started waiting on 111 112 * the runqueue.
+2 -4
kernel/time/ntp.c
··· 516 516 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next)); 517 517 } 518 518 519 - static void notify_cmos_timer(void) 519 + void ntp_notify_cmos_timer(void) 520 520 { 521 521 schedule_delayed_work(&sync_cmos_work, 0); 522 522 } 523 523 524 524 #else 525 - static inline void notify_cmos_timer(void) { } 525 + void ntp_notify_cmos_timer(void) { } 526 526 #endif 527 527 528 528 ··· 686 686 txc->time.tv_usec = ts->tv_nsec; 687 687 if (!(time_status & STA_NANO)) 688 688 txc->time.tv_usec /= NSEC_PER_USEC; 689 - 690 - notify_cmos_timer(); 691 689 692 690 return result; 693 691 }
+2
kernel/time/timekeeping.c
··· 1703 1703 write_seqcount_end(&timekeeper_seq); 1704 1704 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1705 1705 1706 + ntp_notify_cmos_timer(); 1707 + 1706 1708 return ret; 1707 1709 } 1708 1710
+2 -2
lib/lockref.c
··· 14 14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 15 15 struct lockref new = old, prev = old; \ 16 16 CODE \ 17 - old.lock_count = cmpxchg(&lockref->lock_count, \ 18 - old.lock_count, new.lock_count); \ 17 + old.lock_count = cmpxchg64(&lockref->lock_count, \ 18 + old.lock_count, new.lock_count); \ 19 19 if (likely(old.lock_count == prev.lock_count)) { \ 20 20 SUCCESS; \ 21 21 } \
+2
net/batman-adv/soft-interface.c
··· 168 168 case ETH_P_8021Q: 169 169 vhdr = (struct vlan_ethhdr *)skb->data; 170 170 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 171 + vid |= BATADV_VLAN_HAS_TAG; 171 172 172 173 if (vhdr->h_vlan_encapsulated_proto != ethertype) 173 174 break; ··· 332 331 case ETH_P_8021Q: 333 332 vhdr = (struct vlan_ethhdr *)skb->data; 334 333 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 334 + vid |= BATADV_VLAN_HAS_TAG; 335 335 336 336 if (vhdr->h_vlan_encapsulated_proto != ethertype) 337 337 break;
+2 -2
net/bridge/br_netlink.c
··· 207 207 struct net_device *dev, u32 filter_mask) 208 208 { 209 209 int err = 0; 210 - struct net_bridge_port *port = br_port_get_rcu(dev); 210 + struct net_bridge_port *port = br_port_get_rtnl(dev); 211 211 212 212 /* not a bridge port and */ 213 213 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN)) ··· 451 451 struct net_port_vlans *pv; 452 452 453 453 if (br_port_exists(dev)) 454 - pv = nbp_get_vlan_info(br_port_get_rcu(dev)); 454 + pv = nbp_get_vlan_info(br_port_get_rtnl(dev)); 455 455 else if (dev->priv_flags & IFF_EBRIDGE) 456 456 pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev)); 457 457 else
+3 -5
net/bridge/br_private.h
··· 202 202 203 203 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) 204 204 { 205 - struct net_bridge_port *port = 206 - rcu_dereference_rtnl(dev->rx_handler_data); 207 - 208 - return br_port_exists(dev) ? port : NULL; 205 + return rcu_dereference(dev->rx_handler_data); 209 206 } 210 207 211 - static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev) 208 + static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *dev) 212 209 { 213 210 return br_port_exists(dev) ? 214 211 rtnl_dereference(dev->rx_handler_data) : NULL; ··· 743 746 extern void br_init_port(struct net_bridge_port *p); 744 747 extern void br_become_designated_port(struct net_bridge_port *p); 745 748 749 + extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t); 746 750 extern int br_set_forward_delay(struct net_bridge *br, unsigned long x); 747 751 extern int br_set_hello_time(struct net_bridge *br, unsigned long x); 748 752 extern int br_set_max_age(struct net_bridge *br, unsigned long x);
+19 -10
net/bridge/br_stp.c
··· 209 209 p->designated_age = jiffies - bpdu->message_age; 210 210 211 211 mod_timer(&p->message_age_timer, jiffies 212 - + (p->br->max_age - bpdu->message_age)); 212 + + (bpdu->max_age - bpdu->message_age)); 213 213 } 214 214 215 215 /* called under bridge lock */ ··· 544 544 545 545 } 546 546 547 - int br_set_forward_delay(struct net_bridge *br, unsigned long val) 547 + void __br_set_forward_delay(struct net_bridge *br, unsigned long t) 548 548 { 549 - unsigned long t = clock_t_to_jiffies(val); 550 - 551 - if (br->stp_enabled != BR_NO_STP && 552 - (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) 553 - return -ERANGE; 554 - 555 - spin_lock_bh(&br->lock); 556 549 br->bridge_forward_delay = t; 557 550 if (br_is_root_bridge(br)) 558 551 br->forward_delay = br->bridge_forward_delay; 552 + } 553 + 554 + int br_set_forward_delay(struct net_bridge *br, unsigned long val) 555 + { 556 + unsigned long t = clock_t_to_jiffies(val); 557 + int err = -ERANGE; 558 + 559 + spin_lock_bh(&br->lock); 560 + if (br->stp_enabled != BR_NO_STP && 561 + (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) 562 + goto unlock; 563 + 564 + __br_set_forward_delay(br, t); 565 + err = 0; 566 + 567 + unlock: 559 568 spin_unlock_bh(&br->lock); 560 - return 0; 569 + return err; 561 570 }
+10 -2
net/bridge/br_stp_if.c
··· 129 129 char *envp[] = { NULL }; 130 130 131 131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 132 + 133 + spin_lock_bh(&br->lock); 134 + 135 + if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) 136 + __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); 137 + else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) 138 + __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); 139 + 132 140 if (r == 0) { 133 141 br->stp_enabled = BR_USER_STP; 134 142 br_debug(br, "userspace STP started\n"); ··· 145 137 br_debug(br, "using kernel STP\n"); 146 138 147 139 /* To start timers on any ports left in blocking */ 148 - spin_lock_bh(&br->lock); 149 140 br_port_state_selection(br); 150 - spin_unlock_bh(&br->lock); 151 141 } 142 + 143 + spin_unlock_bh(&br->lock); 152 144 } 153 145 154 146 static void br_stp_stop(struct net_bridge *br)
+11
net/ceph/osd_client.c
··· 2216 2216 EXPORT_SYMBOL(ceph_osdc_sync); 2217 2217 2218 2218 /* 2219 + * Call all pending notify callbacks - for use after a watch is 2220 + * unregistered, to make sure no more callbacks for it will be invoked 2221 + */ 2222 + extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc) 2223 + { 2224 + flush_workqueue(osdc->notify_wq); 2225 + } 2226 + EXPORT_SYMBOL(ceph_osdc_flush_notifies); 2227 + 2228 + 2229 + /* 2219 2230 * init, shutdown 2220 2231 */ 2221 2232 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
+5 -6
net/core/netpoll.c
··· 550 550 return; 551 551 552 552 proto = ntohs(eth_hdr(skb)->h_proto); 553 - if (proto == ETH_P_IP) { 553 + if (proto == ETH_P_ARP) { 554 554 struct arphdr *arp; 555 555 unsigned char *arp_ptr; 556 556 /* No arp on this interface */ ··· 1284 1284 1285 1285 void netpoll_cleanup(struct netpoll *np) 1286 1286 { 1287 - if (!np->dev) 1288 - return; 1289 - 1290 1287 rtnl_lock(); 1288 + if (!np->dev) 1289 + goto out; 1291 1290 __netpoll_cleanup(np); 1292 - rtnl_unlock(); 1293 - 1294 1291 dev_put(np->dev); 1295 1292 np->dev = NULL; 1293 + out: 1294 + rtnl_unlock(); 1296 1295 } 1297 1296 EXPORT_SYMBOL(netpoll_cleanup); 1298 1297
+1
net/dccp/ipv6.c
··· 135 135 136 136 if (dst) 137 137 dst->ops->redirect(dst, sk, skb); 138 + goto out; 138 139 } 139 140 140 141 if (type == ICMPV6_PKT_TOOBIG) {
+2 -2
net/ipv4/igmp.c
··· 369 369 pip->saddr = fl4.saddr; 370 370 pip->protocol = IPPROTO_IGMP; 371 371 pip->tot_len = 0; /* filled in later */ 372 - ip_select_ident(pip, &rt->dst, NULL); 372 + ip_select_ident(skb, &rt->dst, NULL); 373 373 ((u8 *)&pip[1])[0] = IPOPT_RA; 374 374 ((u8 *)&pip[1])[1] = 4; 375 375 ((u8 *)&pip[1])[2] = 0; ··· 714 714 iph->daddr = dst; 715 715 iph->saddr = fl4.saddr; 716 716 iph->protocol = IPPROTO_IGMP; 717 - ip_select_ident(iph, &rt->dst, NULL); 717 + ip_select_ident(skb, &rt->dst, NULL); 718 718 ((u8 *)&iph[1])[0] = IPOPT_RA; 719 719 ((u8 *)&iph[1])[1] = 4; 720 720 ((u8 *)&iph[1])[2] = 0;
+2 -2
net/ipv4/inetpeer.c
··· 32 32 * At the moment of writing this notes identifier of IP packets is generated 33 33 * to be unpredictable using this code only for packets subjected 34 34 * (actually or potentially) to defragmentation. I.e. DF packets less than 35 - * PMTU in size uses a constant ID and do not use this code (see 36 - * ip_select_ident() in include/net/ip.h). 35 + * PMTU in size when local fragmentation is disabled use a constant ID and do 36 + * not use this code (see ip_select_ident() in include/net/ip.h). 37 37 * 38 38 * Route cache entries hold references to our nodes. 39 39 * New cache entries get references via lookup by destination IP address in
+4 -4
net/ipv4/ip_output.c
··· 148 148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); 149 149 iph->saddr = saddr; 150 150 iph->protocol = sk->sk_protocol; 151 - ip_select_ident(iph, &rt->dst, sk); 151 + ip_select_ident(skb, &rt->dst, sk); 152 152 153 153 if (opt && opt->opt.optlen) { 154 154 iph->ihl += opt->opt.optlen>>2; ··· 386 386 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); 387 387 } 388 388 389 - ip_select_ident_more(iph, &rt->dst, sk, 389 + ip_select_ident_more(skb, &rt->dst, sk, 390 390 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 391 391 392 392 skb->priority = sk->sk_priority; ··· 1316 1316 else 1317 1317 ttl = ip_select_ttl(inet, &rt->dst); 1318 1318 1319 - iph = (struct iphdr *)skb->data; 1319 + iph = ip_hdr(skb); 1320 1320 iph->version = 4; 1321 1321 iph->ihl = 5; 1322 1322 iph->tos = inet->tos; ··· 1324 1324 iph->ttl = ttl; 1325 1325 iph->protocol = sk->sk_protocol; 1326 1326 ip_copy_addrs(iph, fl4); 1327 - ip_select_ident(iph, &rt->dst, sk); 1327 + ip_select_ident(skb, &rt->dst, sk); 1328 1328 1329 1329 if (opt) { 1330 1330 iph->ihl += opt->optlen>>2;
+1 -1
net/ipv4/ipmr.c
··· 1658 1658 iph->protocol = IPPROTO_IPIP; 1659 1659 iph->ihl = 5; 1660 1660 iph->tot_len = htons(skb->len); 1661 - ip_select_ident(iph, skb_dst(skb), NULL); 1661 + ip_select_ident(skb, skb_dst(skb), NULL); 1662 1662 ip_send_check(iph); 1663 1663 1664 1664 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+1 -1
net/ipv4/raw.c
··· 387 387 iph->check = 0; 388 388 iph->tot_len = htons(length); 389 389 if (!iph->id) 390 - ip_select_ident(iph, &rt->dst, NULL); 390 + ip_select_ident(skb, &rt->dst, NULL); 391 391 392 392 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 393 393 }
+3 -1
net/ipv4/tcp_metrics.c
··· 502 502 * ACKs, wait for troubles. 503 503 */ 504 504 if (crtt > tp->srtt) { 505 - inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk)); 505 + /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ 506 + crtt >>= 3; 507 + inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); 506 508 } else if (tp->srtt == 0) { 507 509 /* RFC6298: 5.7 We've failed to get a valid RTT sample from 508 510 * 3WHS. This is most likely due to retransmission,
+1 -1
net/ipv4/xfrm4_mode_tunnel.c
··· 117 117 118 118 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 119 119 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); 120 - ip_select_ident(top_iph, dst->child, NULL); 120 + ip_select_ident(skb, dst->child, NULL); 121 121 122 122 top_iph->ttl = ip4_dst_hoplimit(dst->child); 123 123
+2 -2
net/ipv6/ip6_tunnel.c
··· 1656 1656 1657 1657 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || 1658 1658 nla_put(skb, IFLA_IPTUN_LOCAL, sizeof(struct in6_addr), 1659 - &parm->raddr) || 1660 - nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), 1661 1659 &parm->laddr) || 1660 + nla_put(skb, IFLA_IPTUN_REMOTE, sizeof(struct in6_addr), 1661 + &parm->raddr) || 1662 1662 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) || 1663 1663 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) || 1664 1664 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
+2 -2
net/ipv6/netfilter/nf_nat_proto_icmpv6.c
··· 69 69 hdr = (struct icmp6hdr *)(skb->data + hdroff); 70 70 l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum, 71 71 tuple, maniptype); 72 - if (hdr->icmp6_code == ICMPV6_ECHO_REQUEST || 73 - hdr->icmp6_code == ICMPV6_ECHO_REPLY) { 72 + if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST || 73 + hdr->icmp6_type == ICMPV6_ECHO_REPLY) { 74 74 inet_proto_csum_replace2(&hdr->icmp6_cksum, skb, 75 75 hdr->icmp6_identifier, 76 76 tuple->src.u.icmp.id, 0);
+2 -3
net/netfilter/ipset/ip_set_core.c
··· 1052 1052 * Not an artificial restriction anymore, as we must prevent 1053 1053 * possible loops created by swapping in setlist type of sets. */ 1054 1054 if (!(from->type->features == to->type->features && 1055 - from->type->family == to->type->family)) 1055 + from->family == to->family)) 1056 1056 return -IPSET_ERR_TYPE_MISMATCH; 1057 1057 1058 1058 strncpy(from_name, from->name, IPSET_MAXNAMELEN); ··· 1489 1489 if (ret == -EAGAIN) 1490 1490 ret = 1; 1491 1491 1492 - return (ret < 0 && ret != -ENOTEMPTY) ? ret : 1493 - ret > 0 ? 0 : -IPSET_ERR_EXIST; 1492 + return ret > 0 ? 0 : -IPSET_ERR_EXIST; 1494 1493 } 1495 1494 1496 1495 /* Get headed data of a set */
+2 -2
net/netfilter/ipset/ip_set_getport.c
··· 116 116 { 117 117 int protoff; 118 118 u8 nexthdr; 119 - __be16 frag_off; 119 + __be16 frag_off = 0; 120 120 121 121 nexthdr = ipv6_hdr(skb)->nexthdr; 122 122 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, 123 123 &frag_off); 124 - if (protoff < 0) 124 + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) 125 125 return false; 126 126 127 127 return get_port(skb, nexthdr, protoff, src, port, proto);
+15 -11
net/netfilter/ipset/ip_set_hash_gen.h
··· 325 325 static void 326 326 mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length) 327 327 { 328 - u8 i, j; 328 + u8 i, j, net_end = nets_length - 1; 329 329 330 - for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++) 331 - ; 332 - h->nets[i].nets--; 333 - 334 - if (h->nets[i].nets != 0) 335 - return; 336 - 337 - for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) { 338 - h->nets[j].cidr = h->nets[j + 1].cidr; 339 - h->nets[j].nets = h->nets[j + 1].nets; 330 + for (i = 0; i < nets_length; i++) { 331 + if (h->nets[i].cidr != cidr) 332 + continue; 333 + if (h->nets[i].nets > 1 || i == net_end || 334 + h->nets[i + 1].nets == 0) { 335 + h->nets[i].nets--; 336 + return; 337 + } 338 + for (j = i; j < net_end && h->nets[j].nets; j++) { 339 + h->nets[j].cidr = h->nets[j + 1].cidr; 340 + h->nets[j].nets = h->nets[j + 1].nets; 341 + } 342 + h->nets[j].nets = 0; 343 + return; 340 344 } 341 345 } 342 346 #endif
+2 -2
net/netfilter/ipset/ip_set_hash_ipportnet.c
··· 260 260 e.ip = htonl(ip); 261 261 e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); 262 262 ret = adtfn(set, &e, &ext, &ext, flags); 263 - return ip_set_enomatch(ret, flags, adt) ? 1 : 263 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 264 264 ip_set_eexist(ret, flags) ? 0 : ret; 265 265 } 266 266 ··· 544 544 545 545 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 546 546 ret = adtfn(set, &e, &ext, &ext, flags); 547 - return ip_set_enomatch(ret, flags, adt) ? 1 : 547 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 548 548 ip_set_eexist(ret, flags) ? 0 : ret; 549 549 } 550 550
+2 -2
net/netfilter/ipset/ip_set_hash_net.c
··· 199 199 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 200 200 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); 201 201 ret = adtfn(set, &e, &ext, &ext, flags); 202 - return ip_set_enomatch(ret, flags, adt) ? 1 : 202 + return ip_set_enomatch(ret, flags, adt, set) ? -ret: 203 203 ip_set_eexist(ret, flags) ? 0 : ret; 204 204 } 205 205 ··· 396 396 397 397 ret = adtfn(set, &e, &ext, &ext, flags); 398 398 399 - return ip_set_enomatch(ret, flags, adt) ? 1 : 399 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 400 400 ip_set_eexist(ret, flags) ? 0 : ret; 401 401 } 402 402
+2 -2
net/netfilter/ipset/ip_set_hash_netiface.c
··· 368 368 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { 369 369 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); 370 370 ret = adtfn(set, &e, &ext, &ext, flags); 371 - return ip_set_enomatch(ret, flags, adt) ? 1 : 371 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 372 372 ip_set_eexist(ret, flags) ? 0 : ret; 373 373 } 374 374 ··· 634 634 635 635 ret = adtfn(set, &e, &ext, &ext, flags); 636 636 637 - return ip_set_enomatch(ret, flags, adt) ? 1 : 637 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 638 638 ip_set_eexist(ret, flags) ? 0 : ret; 639 639 } 640 640
+2 -2
net/netfilter/ipset/ip_set_hash_netport.c
··· 244 244 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { 245 245 e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1)); 246 246 ret = adtfn(set, &e, &ext, &ext, flags); 247 - return ip_set_enomatch(ret, flags, adt) ? 1 : 247 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 248 248 ip_set_eexist(ret, flags) ? 0 : ret; 249 249 } 250 250 ··· 489 489 490 490 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 491 491 ret = adtfn(set, &e, &ext, &ext, flags); 492 - return ip_set_enomatch(ret, flags, adt) ? 1 : 492 + return ip_set_enomatch(ret, flags, adt, set) ? -ret : 493 493 ip_set_eexist(ret, flags) ? 0 : ret; 494 494 } 495 495
+1 -1
net/netfilter/ipvs/ip_vs_xmit.c
··· 883 883 iph->daddr = cp->daddr.ip; 884 884 iph->saddr = saddr; 885 885 iph->ttl = old_iph->ttl; 886 - ip_select_ident(iph, &rt->dst, NULL); 886 + ip_select_ident(skb, &rt->dst, NULL); 887 887 888 888 /* Another hack: avoid icmp_send in ip_fragment */ 889 889 skb->local_df = 1;
+1 -2
net/sctp/input.c
··· 634 634 break; 635 635 case ICMP_REDIRECT: 636 636 sctp_icmp_redirect(sk, transport, skb); 637 - err = 0; 638 - break; 637 + /* Fall through to out_unlock. */ 639 638 default: 640 639 goto out_unlock; 641 640 }
+14 -30
net/sctp/ipv6.c
··· 183 183 break; 184 184 case NDISC_REDIRECT: 185 185 sctp_icmp_redirect(sk, transport, skb); 186 - break; 186 + goto out_unlock; 187 187 default: 188 188 break; 189 189 } ··· 204 204 in6_dev_put(idev); 205 205 } 206 206 207 - /* Based on tcp_v6_xmit() in tcp_ipv6.c. */ 208 207 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) 209 208 { 210 209 struct sock *sk = skb->sk; 211 210 struct ipv6_pinfo *np = inet6_sk(sk); 212 - struct flowi6 fl6; 213 - 214 - memset(&fl6, 0, sizeof(fl6)); 215 - 216 - fl6.flowi6_proto = sk->sk_protocol; 217 - 218 - /* Fill in the dest address from the route entry passed with the skb 219 - * and the source address from the transport. 220 - */ 221 - fl6.daddr = transport->ipaddr.v6.sin6_addr; 222 - fl6.saddr = transport->saddr.v6.sin6_addr; 223 - 224 - fl6.flowlabel = np->flow_label; 225 - IP6_ECN_flow_xmit(sk, fl6.flowlabel); 226 - if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL) 227 - fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id; 228 - else 229 - fl6.flowi6_oif = sk->sk_bound_dev_if; 230 - 231 - if (np->opt && np->opt->srcrt) { 232 - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; 233 - fl6.daddr = *rt0->addr; 234 - } 211 + struct flowi6 *fl6 = &transport->fl.u.ip6; 235 212 236 213 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, 237 - skb->len, &fl6.saddr, &fl6.daddr); 214 + skb->len, &fl6->saddr, &fl6->daddr); 238 215 239 - SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 216 + IP6_ECN_flow_xmit(sk, fl6->flowlabel); 240 217 241 218 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 242 219 skb->local_df = 1; 243 220 244 - return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 221 + SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 + 223 + return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 245 224 } 246 225 247 226 /* Returns the dst cache entry for the given source and destination ip ··· 233 254 struct dst_entry *dst = NULL; 234 255 struct flowi6 *fl6 = &fl->u.ip6; 235 256 struct sctp_bind_addr *bp; 257 + struct ipv6_pinfo *np = inet6_sk(sk); 236 258 struct sctp_sockaddr_entry *laddr; 237 259 union sctp_addr *baddr = NULL; 238 260 union sctp_addr *daddr = &t->ipaddr; 239 261 union sctp_addr dst_saddr; 262 + struct in6_addr *final_p, final; 240 263 __u8 matchlen = 0; 241 264 __u8 bmatchlen; 242 265 sctp_scope_t scope; ··· 262 281 pr_debug("src=%pI6 - ", &fl6->saddr); 263 282 } 264 283 265 - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); 284 + final_p = fl6_update_dst(fl6, np->opt, &final); 285 + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 266 286 if (!asoc || saddr) 267 287 goto out; 268 288 ··· 315 333 } 316 334 } 317 335 rcu_read_unlock(); 336 + 318 337 if (baddr) { 319 338 fl6->saddr = baddr->v6.sin6_addr; 320 339 fl6->fl6_sport = baddr->v6.sin6_port; 321 - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); 340 + final_p = fl6_update_dst(fl6, np->opt, &final); 341 + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 322 342 } 323 343 324 344 out:
+11
net/sunrpc/auth_gss/auth_gss.c
··· 1075 1075 kref_put(&gss_auth->kref, gss_free_callback); 1076 1076 } 1077 1077 1078 + /* 1079 + * Auths may be shared between rpc clients that were cloned from a 1080 + * common client with the same xprt, if they also share the flavor and 1081 + * target_name. 1082 + * 1083 + * The auth is looked up from the oldest parent sharing the same 1084 + * cl_xprt, and the auth itself references only that common parent 1085 + * (which is guaranteed to last as long as any of its descendants). 1086 + */ 1078 1087 static struct gss_auth * 1079 1088 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1080 1089 struct rpc_clnt *clnt, ··· 1097 1088 gss_auth, 1098 1089 hash, 1099 1090 hashval) { 1091 + if (gss_auth->client != clnt) 1092 + continue; 1100 1093 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1101 1094 continue; 1102 1095 if (gss_auth->target_name != args->target_name) {
+4 -1
virt/kvm/async_pf.c
··· 101 101 typeof(*work), queue); 102 102 cancel_work_sync(&work->work); 103 103 list_del(&work->queue); 104 - if (!work->done) /* work was canceled */ 104 + if (!work->done) { /* work was canceled */ 105 + mmdrop(work->mm); 106 + kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ 105 107 kmem_cache_free(async_pf_cache, work); 108 + } 106 109 } 107 110 108 111 spin_lock(&vcpu->async_pf.lock);
+9 -5
virt/kvm/kvm_main.c
··· 1058 1058 EXPORT_SYMBOL_GPL(gfn_to_hva); 1059 1059 1060 1060 /* 1061 - * The hva returned by this function is only allowed to be read. 1062 - * It should pair with kvm_read_hva() or kvm_read_hva_atomic(). 1061 + * If writable is set to false, the hva returned by this function is only 1062 + * allowed to be read. 1063 1063 */ 1064 - static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn) 1064 + unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1065 1065 { 1066 + struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1067 + if (writable) 1068 + *writable = !memslot_is_readonly(slot); 1069 + 1066 1070 return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false); 1067 1071 } 1068 1072 ··· 1434 1430 int r; 1435 1431 unsigned long addr; 1436 1432 1437 - addr = gfn_to_hva_read(kvm, gfn); 1433 + addr = gfn_to_hva_prot(kvm, gfn, NULL); 1438 1434 if (kvm_is_error_hva(addr)) 1439 1435 return -EFAULT; 1440 1436 r = kvm_read_hva(data, (void __user *)addr + offset, len); ··· 1472 1468 gfn_t gfn = gpa >> PAGE_SHIFT; 1473 1469 int offset = offset_in_page(gpa); 1474 1470 1475 - addr = gfn_to_hva_read(kvm, gfn); 1471 + addr = gfn_to_hva_prot(kvm, gfn, NULL); 1476 1472 if (kvm_is_error_hva(addr)) 1477 1473 return -EFAULT; 1478 1474 pagefault_disable();