Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'char-misc-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver updates from Greg KH:
"Here's the big char/misc driver patches for 4.10-rc1. Lots of tiny
changes over lots of "minor" driver subsystems, the largest being some
new FPGA drivers. Other than that, a few other new drivers, but no new
driver subsystems added for this kernel cycle, a nice change.

All of these have been in linux-next with no reported issues"

* tag 'char-misc-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (107 commits)
uio-hv-generic: store physical addresses instead of virtual
Tools: hv: kvp: configurable external scripts path
uio-hv-generic: new userspace i/o driver for VMBus
vmbus: add support for dynamic device id's
hv: change clockevents unbind tactics
hv: acquire vmbus_connection.channel_mutex in vmbus_free_channels()
hyperv: Fix spelling of HV_UNKOWN
mei: bus: enable non-blocking RX
mei: fix the back to back interrupt handling
mei: synchronize irq before initiating a reset.
VME: Remove shutdown entry from vme_driver
auxdisplay: ht16k33: select framebuffer helper modules
MAINTAINERS: add git url for fpga
fpga: Clarify how write_init works streaming modes
fpga zynq: Fix incorrect ISR state on bootup
fpga zynq: Remove priv->dev
fpga zynq: Add missing \n to messages
fpga: Add COMPILE_TEST to all drivers
uio: pruss: add clk_disable()
char/pcmcia: add some error checking in scr24x_read()
...

+5989 -704
+11
Documentation/ABI/testing/sysfs-class-fpga-bridge
··· 1 + What: /sys/class/fpga_bridge/<bridge>/name 2 + Date: January 2016 3 + KernelVersion: 4.5 4 + Contact: Alan Tull <atull@opensource.altera.com> 5 + Description: Name of low level FPGA bridge driver. 6 + 7 + What: /sys/class/fpga_bridge/<bridge>/state 8 + Date: January 2016 9 + KernelVersion: 4.5 10 + Contact: Alan Tull <atull@opensource.altera.com> 11 + Description: Show bridge state as "enabled" or "disabled"
+16
Documentation/ABI/testing/sysfs-class-mei
··· 29 29 Also number of registers varies between 1 and 6 30 30 depending on generation. 31 31 32 + What: /sys/class/mei/meiN/hbm_ver 33 + Date: Aug 2016 34 + KernelVersion: 4.9 35 + Contact: Tomas Winkler <tomas.winkler@intel.com> 36 + Description: Display the negotiated HBM protocol version. 37 + 38 + The HBM protocol version negotiated 39 + between the driver and the device. 40 + 41 + What: /sys/class/mei/meiN/hbm_ver_drv 42 + Date: Aug 2016 43 + KernelVersion: 4.9 44 + Contact: Tomas Winkler <tomas.winkler@intel.com> 45 + Description: Display the driver HBM protocol version. 46 + 47 + The HBM protocol version supported by the driver.
+42
Documentation/devicetree/bindings/display/ht16k33.txt
··· 1 + Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan 2 + ------------------------------------------------------------------------------- 3 + 4 + Required properties: 5 + - compatible: "holtek,ht16k33" 6 + - reg: I2C slave address of the chip. 7 + - interrupt-parent: A phandle pointing to the interrupt controller 8 + serving the interrupt for this chip. 9 + - interrupts: Interrupt specification for the key pressed interrupt. 10 + - refresh-rate-hz: Display update interval in HZ. 11 + - debounce-delay-ms: Debouncing interval time in milliseconds. 12 + - linux,keymap: The keymap for keys as described in the binding 13 + document (devicetree/bindings/input/matrix-keymap.txt). 14 + 15 + Optional properties: 16 + - linux,no-autorepeat: Disable keyrepeat. 17 + - default-brightness-level: Initial brightness level [0-15] (default: 15). 18 + 19 + Example: 20 + 21 + &i2c1 { 22 + ht16k33: ht16k33@70 { 23 + compatible = "holtek,ht16k33"; 24 + reg = <0x70>; 25 + refresh-rate-hz = <20>; 26 + debounce-delay-ms = <50>; 27 + interrupt-parent = <&gpio4>; 28 + interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>; 29 + linux,keymap = < 30 + MATRIX_KEY(2, 0, KEY_F6) 31 + MATRIX_KEY(3, 0, KEY_F8) 32 + MATRIX_KEY(4, 0, KEY_F10) 33 + MATRIX_KEY(5, 0, KEY_F4) 34 + MATRIX_KEY(6, 0, KEY_F2) 35 + MATRIX_KEY(2, 1, KEY_F5) 36 + MATRIX_KEY(3, 1, KEY_F7) 37 + MATRIX_KEY(4, 1, KEY_F9) 38 + MATRIX_KEY(5, 1, KEY_F3) 39 + MATRIX_KEY(6, 1, KEY_F1) 40 + >; 41 + }; 42 + };
+494
Documentation/devicetree/bindings/fpga/fpga-region.txt
··· 1 + FPGA Region Device Tree Binding 2 + 3 + Alan Tull 2016 4 + 5 + CONTENTS 6 + - Introduction 7 + - Terminology 8 + - Sequence 9 + - FPGA Region 10 + - Supported Use Models 11 + - Device Tree Examples 12 + - Constraints 13 + 14 + 15 + Introduction 16 + ============ 17 + 18 + FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in 19 + the Device Tree. FPGA Regions provide a way to program FPGAs under device tree 20 + control. 21 + 22 + This device tree binding document hits some of the high points of FPGA usage and 23 + attempts to include terminology used by both major FPGA manufacturers. This 24 + document isn't a replacement for any manufacturers specifications for FPGA 25 + usage. 26 + 27 + 28 + Terminology 29 + =========== 30 + 31 + Full Reconfiguration 32 + * The entire FPGA is programmed. 33 + 34 + Partial Reconfiguration (PR) 35 + * A section of an FPGA is reprogrammed while the rest of the FPGA is not 36 + affected. 37 + * Not all FPGA's support PR. 38 + 39 + Partial Reconfiguration Region (PRR) 40 + * Also called a "reconfigurable partition" 41 + * A PRR is a specific section of a FPGA reserved for reconfiguration. 42 + * A base (or static) FPGA image may create a set of PRR's that later may 43 + be independently reprogrammed many times. 44 + * The size and specific location of each PRR is fixed. 45 + * The connections at the edge of each PRR are fixed. The image that is loaded 46 + into a PRR must fit and must use a subset of the region's connections. 47 + * The busses within the FPGA are split such that each region gets its own 48 + branch that may be gated independently. 49 + 50 + Persona 51 + * Also called a "partial bit stream" 52 + * An FPGA image that is designed to be loaded into a PRR. There may be 53 + any number of personas designed to fit into a PRR, but only one at at time 54 + may be loaded. 55 + * A persona may create more regions. 56 + 57 + FPGA Bridge 58 + * FPGA Bridges gate bus signals between a host and FPGA. 59 + * FPGA Bridges should be disabled while the FPGA is being programmed to 60 + prevent spurious signals on the cpu bus and to the soft logic. 61 + * FPGA bridges may be actual hardware or soft logic on an FPGA. 62 + * During Full Reconfiguration, hardware bridges between the host and FPGA 63 + will be disabled. 64 + * During Partial Reconfiguration of a specific region, that region's bridge 65 + will be used to gate the busses. Traffic to other regions is not affected. 66 + * In some implementations, the FPGA Manager transparantly handles gating the 67 + buses, eliminating the need to show the hardware FPGA bridges in the 68 + device tree. 69 + * An FPGA image may create a set of reprogrammable regions, each having its 70 + own bridge and its own split of the busses in the FPGA. 71 + 72 + FPGA Manager 73 + * An FPGA Manager is a hardware block that programs an FPGA under the control 74 + of a host processor. 75 + 76 + Base Image 77 + * Also called the "static image" 78 + * An FPGA image that is designed to do full reconfiguration of the FPGA. 79 + * A base image may set up a set of partial reconfiguration regions that may 80 + later be reprogrammed. 81 + 82 + ---------------- ---------------------------------- 83 + | Host CPU | | FPGA | 84 + | | | | 85 + | ----| | ----------- -------- | 86 + | | H | | |==>| Bridge0 |<==>| PRR0 | | 87 + | | W | | | ----------- -------- | 88 + | | | | | | 89 + | | B |<=====>|<==| ----------- -------- | 90 + | | R | | |==>| Bridge1 |<==>| PRR1 | | 91 + | | I | | | ----------- -------- | 92 + | | D | | | | 93 + | | G | | | ----------- -------- | 94 + | | E | | |==>| Bridge2 |<==>| PRR2 | | 95 + | ----| | ----------- -------- | 96 + | | | | 97 + ---------------- ---------------------------------- 98 + 99 + Figure 1: An FPGA set up with a base image that created three regions. Each 100 + region (PRR0-2) gets its own split of the busses that is independently gated by 101 + a soft logic bridge (Bridge0-2) in the FPGA. The contents of each PRR can be 102 + reprogrammed independently while the rest of the system continues to function. 103 + 104 + 105 + Sequence 106 + ======== 107 + 108 + When a DT overlay that targets a FPGA Region is applied, the FPGA Region will 109 + do the following: 110 + 111 + 1. Disable appropriate FPGA bridges. 112 + 2. Program the FPGA using the FPGA manager. 113 + 3. Enable the FPGA bridges. 114 + 4. The Device Tree overlay is accepted into the live tree. 115 + 5. Child devices are populated. 116 + 117 + When the overlay is removed, the child nodes will be removed and the FPGA Region 118 + will disable the bridges. 119 + 120 + 121 + FPGA Region 122 + =========== 123 + 124 + FPGA Regions represent FPGA's and FPGA PR regions in the device tree. An FPGA 125 + Region brings together the elements needed to program on a running system and 126 + add the child devices: 127 + 128 + * FPGA Manager 129 + * FPGA Bridges 130 + * image-specific information needed to to the programming. 131 + * child nodes 132 + 133 + The intended use is that a Device Tree overlay (DTO) can be used to reprogram an 134 + FPGA while an operating system is running. 135 + 136 + An FPGA Region that exists in the live Device Tree reflects the current state. 137 + If the live tree shows a "firmware-name" property or child nodes under a FPGA 138 + Region, the FPGA already has been programmed. A DTO that targets a FPGA Region 139 + and adds the "firmware-name" property is taken as a request to reprogram the 140 + FPGA. After reprogramming is successful, the overlay is accepted into the live 141 + tree. 142 + 143 + The base FPGA Region in the device tree represents the FPGA and supports full 144 + reconfiguration. It must include a phandle to an FPGA Manager. The base 145 + FPGA region will be the child of one of the hardware bridges (the bridge that 146 + allows register access) between the cpu and the FPGA. If there are more than 147 + one bridge to control during FPGA programming, the region will also contain a 148 + list of phandles to the additional hardware FPGA Bridges. 149 + 150 + For partial reconfiguration (PR), each PR region will have an FPGA Region. 151 + These FPGA regions are children of FPGA bridges which are then children of the 152 + base FPGA region. The "Full Reconfiguration to add PRR's" example below shows 153 + this. 154 + 155 + If an FPGA Region does not specify a FPGA Manager, it will inherit the FPGA 156 + Manager specified by its ancestor FPGA Region. This supports both the case 157 + where the same FPGA Manager is used for all of a FPGA as well the case where 158 + a different FPGA Manager is used for each region. 159 + 160 + FPGA Regions do not inherit their ancestor FPGA regions' bridges. This prevents 161 + shutting down bridges that are upstream from the other active regions while one 162 + region is getting reconfigured (see Figure 1 above). During PR, the FPGA's 163 + hardware bridges remain enabled. The PR regions' bridges will be FPGA bridges 164 + within the static image of the FPGA. 165 + 166 + Required properties: 167 + - compatible : should contain "fpga-region" 168 + - fpga-mgr : should contain a phandle to an FPGA Manager. Child FPGA Regions 169 + inherit this property from their ancestor regions. A fpga-mgr property 170 + in a region will override any inherited FPGA manager. 171 + - #address-cells, #size-cells, ranges : must be present to handle address space 172 + mapping for child nodes. 173 + 174 + Optional properties: 175 + - firmware-name : should contain the name of an FPGA image file located on the 176 + firmware search path. If this property shows up in a live device tree 177 + it indicates that the FPGA has already been programmed with this image. 178 + If this property is in an overlay targeting a FPGA region, it is a 179 + request to program the FPGA with that image. 180 + - fpga-bridges : should contain a list of phandles to FPGA Bridges that must be 181 + controlled during FPGA programming along with the parent FPGA bridge. 182 + This property is optional if the FPGA Manager handles the bridges. 183 + If the fpga-region is the child of a fpga-bridge, the list should not 184 + contain the parent bridge. 185 + - partial-fpga-config : boolean, set if partial reconfiguration is to be done, 186 + otherwise full reconfiguration is done. 187 + - external-fpga-config : boolean, set if the FPGA has already been configured 188 + prior to OS boot up. 189 + - region-unfreeze-timeout-us : The maximum time in microseconds to wait for 190 + bridges to successfully become enabled after the region has been 191 + programmed. 192 + - region-freeze-timeout-us : The maximum time in microseconds to wait for 193 + bridges to successfully become disabled before the region has been 194 + programmed. 195 + - child nodes : devices in the FPGA after programming. 196 + 197 + In the example below, when an overlay is applied targeting fpga-region0, 198 + fpga_mgr is used to program the FPGA. Two bridges are controlled during 199 + programming: the parent fpga_bridge0 and fpga_bridge1. Because the region is 200 + the child of fpga_bridge0, only fpga_bridge1 needs to be specified in the 201 + fpga-bridges property. During programming, these bridges are disabled, the 202 + firmware specified in the overlay is loaded to the FPGA using the FPGA manager 203 + specified in the region. If FPGA programming succeeds, the bridges are 204 + reenabled and the overlay makes it into the live device tree. The child devices 205 + are then populated. If FPGA programming fails, the bridges are left disabled 206 + and the overlay is rejected. The overlay's ranges property maps the lwhps 207 + bridge's region (0xff200000) and the hps bridge's region (0xc0000000) for use by 208 + the two child devices. 209 + 210 + Example: 211 + Base tree contains: 212 + 213 + fpga_mgr: fpga-mgr@ff706000 { 214 + compatible = "altr,socfpga-fpga-mgr"; 215 + reg = <0xff706000 0x1000 216 + 0xffb90000 0x20>; 217 + interrupts = <0 175 4>; 218 + }; 219 + 220 + fpga_bridge0: fpga-bridge@ff400000 { 221 + compatible = "altr,socfpga-lwhps2fpga-bridge"; 222 + reg = <0xff400000 0x100000>; 223 + resets = <&rst LWHPS2FPGA_RESET>; 224 + clocks = <&l4_main_clk>; 225 + 226 + #address-cells = <1>; 227 + #size-cells = <1>; 228 + ranges; 229 + 230 + fpga_region0: fpga-region0 { 231 + compatible = "fpga-region"; 232 + fpga-mgr = <&fpga_mgr>; 233 + }; 234 + }; 235 + 236 + fpga_bridge1: fpga-bridge@ff500000 { 237 + compatible = "altr,socfpga-hps2fpga-bridge"; 238 + reg = <0xff500000 0x10000>; 239 + resets = <&rst HPS2FPGA_RESET>; 240 + clocks = <&l4_main_clk>; 241 + }; 242 + 243 + Overlay contains: 244 + 245 + /dts-v1/ /plugin/; 246 + / { 247 + fragment@0 { 248 + target = <&fpga_region0>; 249 + #address-cells = <1>; 250 + #size-cells = <1>; 251 + __overlay__ { 252 + #address-cells = <1>; 253 + #size-cells = <1>; 254 + 255 + firmware-name = "soc_system.rbf"; 256 + fpga-bridges = <&fpga_bridge1>; 257 + ranges = <0x20000 0xff200000 0x100000>, 258 + <0x0 0xc0000000 0x20000000>; 259 + 260 + gpio@10040 { 261 + compatible = "altr,pio-1.0"; 262 + reg = <0x10040 0x20>; 263 + altr,gpio-bank-width = <4>; 264 + #gpio-cells = <2>; 265 + clocks = <2>; 266 + gpio-controller; 267 + }; 268 + 269 + onchip-memory { 270 + device_type = "memory"; 271 + compatible = "altr,onchipmem-15.1"; 272 + reg = <0x0 0x10000>; 273 + }; 274 + }; 275 + }; 276 + }; 277 + 278 + 279 + Supported Use Models 280 + ==================== 281 + 282 + In all cases the live DT must have the FPGA Manager, FPGA Bridges (if any), and 283 + a FPGA Region. The target of the Device Tree Overlay is the FPGA Region. Some 284 + uses are specific to a FPGA device. 285 + 286 + * No FPGA Bridges 287 + In this case, the FPGA Manager which programs the FPGA also handles the 288 + bridges behind the scenes. No FPGA Bridge devices are needed for full 289 + reconfiguration. 290 + 291 + * Full reconfiguration with hardware bridges 292 + In this case, there are hardware bridges between the processor and FPGA that 293 + need to be controlled during full reconfiguration. Before the overlay is 294 + applied, the live DT must include the FPGA Manager, FPGA Bridges, and a 295 + FPGA Region. The FPGA Region is the child of the bridge that allows 296 + register access to the FPGA. Additional bridges may be listed in a 297 + fpga-bridges property in the FPGA region or in the device tree overlay. 298 + 299 + * Partial reconfiguration with bridges in the FPGA 300 + In this case, the FPGA will have one or more PRR's that may be programmed 301 + separately while the rest of the FPGA can remain active. To manage this, 302 + bridges need to exist in the FPGA that can gate the buses going to each FPGA 303 + region while the buses are enabled for other sections. Before any partial 304 + reconfiguration can be done, a base FPGA image must be loaded which includes 305 + PRR's with FPGA bridges. The device tree should have a FPGA region for each 306 + PRR. 307 + 308 + Device Tree Examples 309 + ==================== 310 + 311 + The intention of this section is to give some simple examples, focusing on 312 + the placement of the elements detailed above, especially: 313 + * FPGA Manager 314 + * FPGA Bridges 315 + * FPGA Region 316 + * ranges 317 + * target-path or target 318 + 319 + For the purposes of this section, I'm dividing the Device Tree into two parts, 320 + each with its own requirements. The two parts are: 321 + * The live DT prior to the overlay being added 322 + * The DT overlay 323 + 324 + The live Device Tree must contain an FPGA Region, an FPGA Manager, and any FPGA 325 + Bridges. The FPGA Region's "fpga-mgr" property specifies the manager by phandle 326 + to handle programming the FPGA. If the FPGA Region is the child of another FPGA 327 + Region, the parent's FPGA Manager is used. If FPGA Bridges need to be involved, 328 + they are specified in the FPGA Region by the "fpga-bridges" property. During 329 + FPGA programming, the FPGA Region will disable the bridges that are in its 330 + "fpga-bridges" list and will re-enable them after FPGA programming has 331 + succeeded. 332 + 333 + The Device Tree Overlay will contain: 334 + * "target-path" or "target" 335 + The insertion point where the the contents of the overlay will go into the 336 + live tree. target-path is a full path, while target is a phandle. 337 + * "ranges" 338 + The address space mapping from processor to FPGA bus(ses). 339 + * "firmware-name" 340 + Specifies the name of the FPGA image file on the firmware search 341 + path. The search path is described in the firmware class documentation. 342 + * "partial-fpga-config" 343 + This binding is a boolean and should be present if partial reconfiguration 344 + is to be done. 345 + * child nodes corresponding to hardware that will be loaded in this region of 346 + the FPGA. 347 + 348 + Device Tree Example: Full Reconfiguration without Bridges 349 + ========================================================= 350 + 351 + Live Device Tree contains: 352 + fpga_mgr0: fpga-mgr@f8007000 { 353 + compatible = "xlnx,zynq-devcfg-1.0"; 354 + reg = <0xf8007000 0x100>; 355 + interrupt-parent = <&intc>; 356 + interrupts = <0 8 4>; 357 + clocks = <&clkc 12>; 358 + clock-names = "ref_clk"; 359 + syscon = <&slcr>; 360 + }; 361 + 362 + fpga_region0: fpga-region0 { 363 + compatible = "fpga-region"; 364 + fpga-mgr = <&fpga_mgr0>; 365 + #address-cells = <0x1>; 366 + #size-cells = <0x1>; 367 + ranges; 368 + }; 369 + 370 + DT Overlay contains: 371 + /dts-v1/ /plugin/; 372 + / { 373 + fragment@0 { 374 + target = <&fpga_region0>; 375 + #address-cells = <1>; 376 + #size-cells = <1>; 377 + __overlay__ { 378 + #address-cells = <1>; 379 + #size-cells = <1>; 380 + 381 + firmware-name = "zynq-gpio.bin"; 382 + 383 + gpio1: gpio@40000000 { 384 + compatible = "xlnx,xps-gpio-1.00.a"; 385 + reg = <0x40000000 0x10000>; 386 + gpio-controller; 387 + #gpio-cells = <0x2>; 388 + xlnx,gpio-width= <0x6>; 389 + }; 390 + }; 391 + }; 392 + 393 + Device Tree Example: Full Reconfiguration to add PRR's 394 + ====================================================== 395 + 396 + The base FPGA Region is specified similar to the first example above. 397 + 398 + This example programs the FPGA to have two regions that can later be partially 399 + configured. Each region has its own bridge in the FPGA fabric. 400 + 401 + DT Overlay contains: 402 + /dts-v1/ /plugin/; 403 + / { 404 + fragment@0 { 405 + target = <&fpga_region0>; 406 + #address-cells = <1>; 407 + #size-cells = <1>; 408 + __overlay__ { 409 + #address-cells = <1>; 410 + #size-cells = <1>; 411 + 412 + firmware-name = "base.rbf"; 413 + 414 + fpga-bridge@4400 { 415 + compatible = "altr,freeze-bridge"; 416 + reg = <0x4400 0x10>; 417 + 418 + fpga_region1: fpga-region1 { 419 + compatible = "fpga-region"; 420 + #address-cells = <0x1>; 421 + #size-cells = <0x1>; 422 + ranges; 423 + }; 424 + }; 425 + 426 + fpga-bridge@4420 { 427 + compatible = "altr,freeze-bridge"; 428 + reg = <0x4420 0x10>; 429 + 430 + fpga_region2: fpga-region2 { 431 + compatible = "fpga-region"; 432 + #address-cells = <0x1>; 433 + #size-cells = <0x1>; 434 + ranges; 435 + }; 436 + }; 437 + }; 438 + }; 439 + }; 440 + 441 + Device Tree Example: Partial Reconfiguration 442 + ============================================ 443 + 444 + This example reprograms one of the PRR's set up in the previous example. 445 + 446 + The sequence that occurs when this overlay is similar to the above, the only 447 + differences are that the FPGA is partially reconfigured due to the 448 + "partial-fpga-config" boolean and the only bridge that is controlled during 449 + programming is the FPGA based bridge of fpga_region1. 450 + 451 + /dts-v1/ /plugin/; 452 + / { 453 + fragment@0 { 454 + target = <&fpga_region1>; 455 + #address-cells = <1>; 456 + #size-cells = <1>; 457 + __overlay__ { 458 + #address-cells = <1>; 459 + #size-cells = <1>; 460 + 461 + firmware-name = "soc_image2.rbf"; 462 + partial-fpga-config; 463 + 464 + gpio@10040 { 465 + compatible = "altr,pio-1.0"; 466 + reg = <0x10040 0x20>; 467 + clocks = <0x2>; 468 + altr,gpio-bank-width = <0x4>; 469 + resetvalue = <0x0>; 470 + #gpio-cells = <0x2>; 471 + gpio-controller; 472 + }; 473 + }; 474 + }; 475 + }; 476 + 477 + Constraints 478 + =========== 479 + 480 + It is beyond the scope of this document to fully describe all the FPGA design 481 + constraints required to make partial reconfiguration work[1] [2] [3], but a few 482 + deserve quick mention. 483 + 484 + A persona must have boundary connections that line up with those of the partion 485 + or region it is designed to go into. 486 + 487 + During programming, transactions through those connections must be stopped and 488 + the connections must be held at a fixed logic level. This can be achieved by 489 + FPGA Bridges that exist on the FPGA fabric prior to the partial reconfiguration. 490 + 491 + -- 492 + [1] www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug_partrecon.pdf 493 + [2] tspace.library.utoronto.ca/bitstream/1807/67932/1/Byma_Stuart_A_201411_MAS_thesis.pdf 494 + [3] http://www.xilinx.com/support/documentation/sw_manuals/xilinx14_1/ug702.pdf
+17
Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
··· 1 + Broadcom OTP memory controller 2 + 3 + Required Properties: 4 + - compatible: "brcm,ocotp" for the first generation Broadcom OTPC which is used 5 + in Cygnus and supports 32 bit read/write. Use "brcm,ocotp-v2" for the second 6 + generation Broadcom OTPC which is used in SoC's such as Stingray and supports 7 + 64-bit read/write. 8 + - reg: Base address of the OTP controller. 9 + - brcm,ocotp-size: Amount of memory available, in 32 bit words 10 + 11 + Example: 12 + 13 + otp: otp@0301c800 { 14 + compatible = "brcm,ocotp"; 15 + reg = <0x0301c800 0x2c>; 16 + brcm,ocotp-size = <2048>; 17 + };
+20
Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
··· 1 + * NXP LPC18xx OTP memory 2 + 3 + Internal OTP (One Time Programmable) memory for NXP LPC18xx/43xx devices. 4 + 5 + Required properties: 6 + - compatible: Should be "nxp,lpc1850-otp" 7 + - reg: Must contain an entry with the physical base address and length 8 + for each entry in reg-names. 9 + - address-cells: must be set to 1. 10 + - size-cells: must be set to 1. 11 + 12 + See nvmem.txt for more information. 13 + 14 + Example: 15 + otp: otp@40045000 { 16 + compatible = "nxp,lpc1850-otp"; 17 + reg = <0x40045000 0x1000>; 18 + #address-cells = <1>; 19 + #size-cells = <1>; 20 + };
+1
Documentation/devicetree/bindings/vendor-prefixes.txt
··· 127 127 holt Holt Integrated Circuits, Inc. 128 128 honeywell Honeywell 129 129 hp Hewlett Packard 130 + holtek Holtek Semiconductor, Inc. 130 131 i2se I2SE GmbH 131 132 ibm International Business Machines (IBM) 132 133 idt Integrated Device Technologies, Inc.
+29 -14
Documentation/fpga/fpga-mgr.txt
··· 18 18 To program the FPGA from a file or from a buffer: 19 19 ------------------------------------------------- 20 20 21 - int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, 21 + int fpga_mgr_buf_load(struct fpga_manager *mgr, 22 + struct fpga_image_info *info, 22 23 const char *buf, size_t count); 23 24 24 25 Load the FPGA from an image which exists as a buffer in memory. 25 26 26 - int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, 27 + int fpga_mgr_firmware_load(struct fpga_manager *mgr, 28 + struct fpga_image_info *info, 27 29 const char *image_name); 28 30 29 31 Load the FPGA from an image which exists as a file. The image file must be on 30 - the firmware search path (see the firmware class documentation). 32 + the firmware search path (see the firmware class documentation). If successful, 33 + the FPGA ends up in operating mode. Return 0 on success or a negative error 34 + code. 31 35 32 - For both these functions, flags == 0 for normal full reconfiguration or 33 - FPGA_MGR_PARTIAL_RECONFIG for partial reconfiguration. If successful, the FPGA 34 - ends up in operating mode. Return 0 on success or a negative error code. 35 - 36 + A FPGA design contained in a FPGA image file will likely have particulars that 37 + affect how the image is programmed to the FPGA. These are contained in struct 38 + fpga_image_info. Currently the only such particular is a single flag bit 39 + indicating whether the image is for full or partial reconfiguration. 36 40 37 41 To get/put a reference to a FPGA manager: 38 42 ----------------------------------------- 39 43 40 44 struct fpga_manager *of_fpga_mgr_get(struct device_node *node); 45 + struct fpga_manager *fpga_mgr_get(struct device *dev); 46 + 47 + Given a DT node or device, get an exclusive reference to a FPGA manager. 41 48 42 49 void fpga_mgr_put(struct fpga_manager *mgr); 43 50 44 - Given a DT node, get an exclusive reference to a FPGA manager or release 45 - the reference. 51 + Release the reference. 46 52 47 53 48 54 To register or unregister the low level FPGA-specific driver: ··· 76 70 char *buf = ... 77 71 int count = ... 78 72 73 + /* struct with information about the FPGA image to program. */ 74 + struct fpga_image_info info; 75 + 79 76 /* flags indicates whether to do full or partial reconfiguration */ 80 - int flags = 0; 77 + info.flags = 0; 81 78 82 79 int ret; 83 80 ··· 88 79 struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node); 89 80 90 81 /* Load the buffer to the FPGA */ 91 - ret = fpga_mgr_buf_load(mgr, flags, buf, count); 82 + ret = fpga_mgr_buf_load(mgr, &info, buf, count); 92 83 93 84 /* Release the FPGA manager */ 94 85 fpga_mgr_put(mgr); ··· 105 96 /* FPGA image is in this file which is in the firmware search path */ 106 97 const char *path = "fpga-image-9.rbf" 107 98 99 + /* struct with information about the FPGA image to program. */ 100 + struct fpga_image_info info; 101 + 108 102 /* flags indicates whether to do full or partial reconfiguration */ 109 - int flags = 0; 103 + info.flags = 0; 110 104 111 105 int ret; 112 106 ··· 117 105 struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node); 118 106 119 107 /* Get the firmware image (path) and load it to the FPGA */ 120 - ret = fpga_mgr_firmware_load(mgr, flags, path); 108 + ret = fpga_mgr_firmware_load(mgr, &info, path); 121 109 122 110 /* Release the FPGA manager */ 123 111 fpga_mgr_put(mgr); ··· 169 157 2. .write (may be called once or multiple times) 170 158 3. .write_complete 171 159 172 - The .write_init function will prepare the FPGA to receive the image data. 160 + The .write_init function will prepare the FPGA to receive the image data. The 161 + buffer passed into .write_init will be atmost .initial_header_size bytes long, 162 + if the whole bitstream is not immediately available then the core code will 163 + buffer up at least this much before starting. 173 164 174 165 The .write function writes a buffer to the FPGA. The buffer may be contain the 175 166 whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
+22
Documentation/trace/intel_th.txt
··· 97 97 # and now you can collect the trace from the device node: 98 98 99 99 $ cat /dev/intel_th0/msc0 > my_stp_trace 100 + 101 + Host Debugger Mode 102 + ================== 103 + 104 + It is possible to configure the Trace Hub and control its trace 105 + capture from a remote debug host, which should be connected via one of 106 + the hardware debugging interfaces, which will then be used to both 107 + control Intel Trace Hub and transfer its trace data to the debug host. 108 + 109 + The driver needs to be told that such an arrangement is taking place 110 + so that it does not touch any capture/port configuration and avoids 111 + conflicting with the debug host's configuration accesses. The only 112 + activity that the driver will perform in this mode is collecting 113 + software traces to the Software Trace Hub (an stm class device). The 114 + user is still responsible for setting up adequate master/channel 115 + mappings that the decoder on the receiving end would recognize. 116 + 117 + In order to enable the host mode, set the 'host_mode' parameter of the 118 + 'intel_th' kernel module to 'y'. None of the virtual output devices 119 + will show up on the intel_th bus. Also, trace configuration and 120 + capture controlling attribute groups of the 'gth' device will not be 121 + exposed. The 'sth' device will operate as usual.
+35 -4
Documentation/trace/stm.txt
··· 69 69 width==64, you should be able to mmap() one page on this file 70 70 descriptor and obtain direct access to an mmio region for 64 channels. 71 71 72 - For kernel-based trace sources, there is "stm_source" device 73 - class. Devices of this class can be connected and disconnected to/from 74 - stm devices at runtime via a sysfs attribute. 75 - 76 72 Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM 77 73 [2]. 74 + 75 + stm_source 76 + ========== 77 + 78 + For kernel-based trace sources, there is "stm_source" device 79 + class. Devices of this class can be connected and disconnected to/from 80 + stm devices at runtime via a sysfs attribute called "stm_source_link" 81 + by writing the name of the desired stm device there, for example: 82 + 83 + $ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link 84 + 85 + For examples on how to use stm_source interface in the kernel, refer 86 + to stm_console or stm_heartbeat drivers. 87 + 88 + Each stm_source device will need to assume a master and a range of 89 + channels, depending on how many channels it requires. These are 90 + allocated for the device according to the policy configuration. If 91 + there's a node in the root of the policy directory that matches the 92 + stm_source device's name (for example, "console"), this node will be 93 + used to allocate master and channel numbers. If there's no such policy 94 + node, the stm core will pick the first contiguous chunk of channels 95 + within the first available master. Note that the node must exist 96 + before the stm_source device is connected to its stm device. 97 + 98 + stm_console 99 + =========== 100 + 101 + One implementation of this interface also used in the example above is 102 + the "stm_console" driver, which basically provides a one-way console 103 + for kernel messages over an stm device. 104 + 105 + To configure the master/channel pair that will be assigned to this 106 + console in the STP stream, create a "console" policy entry (see the 107 + beginning of this text on how to do that). When initialized, it will 108 + consume one channel. 78 109 79 110 [1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf 80 111 [2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
+17 -3
MAINTAINERS
··· 3067 3067 F: drivers/usb/wusbcore/ 3068 3068 F: include/linux/usb/wusb* 3069 3069 3070 + HT16K33 LED CONTROLLER DRIVER 3071 + M: Robin van der Gracht <robin@protonic.nl> 3072 + S: Maintained 3073 + F: drivers/auxdisplay/ht16k33.c 3074 + F: Documentation/devicetree/bindings/display/ht16k33.txt 3075 + 3070 3076 CFAG12864B LCD DRIVER 3071 3077 M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com> 3072 3078 W: http://miguelojeda.es/auxdisplay.htm ··· 5049 5043 FPGA MANAGER FRAMEWORK 5050 5044 M: Alan Tull <atull@opensource.altera.com> 5051 5045 R: Moritz Fischer <moritz.fischer@ettus.com> 5046 + L: linux-fpga@vger.kernel.org 5052 5047 S: Maintained 5048 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git 5053 5049 F: drivers/fpga/ 5054 5050 F: include/linux/fpga/fpga-mgr.h 5055 5051 W: http://www.rocketboards.org ··· 5948 5940 F: drivers/pci/host/pci-hyperv.c 5949 5941 F: drivers/net/hyperv/ 5950 5942 F: drivers/scsi/storvsc_drv.c 5943 + F: drivers/uio/uio_hv_generic.c 5951 5944 F: drivers/video/fbdev/hyperv_fb.c 5952 5945 F: include/linux/hyperv.h 5953 5946 F: tools/hv/ ··· 9240 9231 9241 9232 PARALLEL PORT SUBSYSTEM 9242 9233 M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 9243 - M: Sudip Mukherjee <sudip@vectorindia.org> 9234 + M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk> 9244 9235 L: linux-parport@lists.infradead.org (subscribers-only) 9245 9236 S: Maintained 9246 9237 F: drivers/parport/ ··· 10850 10841 S: Supported 10851 10842 F: arch/score/ 10852 10843 10844 + SCR24X CHIP CARD INTERFACE DRIVER 10845 + M: Lubomir Rintel <lkundrak@v3.sk> 10846 + S: Supported 10847 + F: drivers/char/pcmcia/scr24x_cs.c 10848 + 10853 10849 SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers 10854 10850 M: Sudeep Holla <sudeep.holla@arm.com> 10855 10851 L: linux-arm-kernel@lists.infradead.org ··· 11258 11244 SILICON MOTION SM712 FRAME BUFFER DRIVER 11259 11245 M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 11260 11246 M: Teddy Wang <teddy.wang@siliconmotion.com> 11261 - M: Sudip Mukherjee <sudip@vectorindia.org> 11247 + M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk> 11262 11248 L: linux-fbdev@vger.kernel.org 11263 11249 S: Maintained 11264 11250 F: drivers/video/fbdev/sm712* ··· 11686 11672 STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER 11687 11673 M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 11688 11674 M: Teddy Wang <teddy.wang@siliconmotion.com> 11689 - M: Sudip Mukherjee <sudip@vectorindia.org> 11675 + M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk> 11690 11676 L: linux-fbdev@vger.kernel.org 11691 11677 S: Maintained 11692 11678 F: drivers/staging/sm750fb/
+4 -6
arch/blackfin/mach-bf561/coreb.c
··· 1 1 /* Load firmware into Core B on a BF561 2 2 * 3 + * Author: Bas Vermeulen <bvermeul@blackstar.xs4all.nl> 4 + * 3 5 * Copyright 2004-2009 Analog Devices Inc. 4 6 * Licensed under the GPL-2 or later. 5 7 */ ··· 16 14 17 15 #include <linux/device.h> 18 16 #include <linux/fs.h> 17 + #include <linux/init.h> 19 18 #include <linux/kernel.h> 20 19 #include <linux/miscdevice.h> 21 - #include <linux/module.h> 22 20 23 21 #define CMD_COREB_START _IO('b', 0) 24 22 #define CMD_COREB_STOP _IO('b', 1) ··· 61 59 .name = "coreb", 62 60 .fops = &coreb_fops, 63 61 }; 64 - module_misc_device(coreb_dev); 65 - 66 - MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>"); 67 - MODULE_DESCRIPTION("BF561 Core B Support"); 68 - MODULE_LICENSE("GPL"); 62 + builtin_misc_device(coreb_dev);
+13
drivers/auxdisplay/Kconfig
··· 128 128 development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3 129 129 from Imagination Technologies. 130 130 131 + config HT16K33 132 + tristate "Holtek Ht16K33 LED controller with keyscan" 133 + depends on FB && OF && I2C && INPUT 134 + select FB_SYS_FOPS 135 + select FB_CFB_FILLRECT 136 + select FB_CFB_COPYAREA 137 + select FB_CFB_IMAGEBLIT 138 + select INPUT_MATRIXKMAP 139 + select FB_BACKLIGHT 140 + help 141 + Say yes here to add support for Holtek HT16K33, RAM mapping 16*8 142 + LED controller driver with keyscan. 143 + 131 144 endif # AUXDISPLAY
+1
drivers/auxdisplay/Makefile
··· 5 5 obj-$(CONFIG_KS0108) += ks0108.o 6 6 obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o 7 7 obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o 8 + obj-$(CONFIG_HT16K33) += ht16k33.o
+563
drivers/auxdisplay/ht16k33.c
··· 1 + /* 2 + * HT16K33 driver 3 + * 4 + * Author: Robin van der Gracht <robin@protonic.nl> 5 + * 6 + * Copyright: (C) 2016 Protonic Holland. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, but 13 + * WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 + * General Public License for more details. 16 + */ 17 + 18 + #include <linux/kernel.h> 19 + #include <linux/module.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/i2c.h> 22 + #include <linux/of.h> 23 + #include <linux/fb.h> 24 + #include <linux/slab.h> 25 + #include <linux/backlight.h> 26 + #include <linux/input.h> 27 + #include <linux/input/matrix_keypad.h> 28 + #include <linux/workqueue.h> 29 + #include <linux/mm.h> 30 + 31 + /* Registers */ 32 + #define REG_SYSTEM_SETUP 0x20 33 + #define REG_SYSTEM_SETUP_OSC_ON BIT(0) 34 + 35 + #define REG_DISPLAY_SETUP 0x80 36 + #define REG_DISPLAY_SETUP_ON BIT(0) 37 + 38 + #define REG_ROWINT_SET 0xA0 39 + #define REG_ROWINT_SET_INT_EN BIT(0) 40 + #define REG_ROWINT_SET_INT_ACT_HIGH BIT(1) 41 + 42 + #define REG_BRIGHTNESS 0xE0 43 + 44 + /* Defines */ 45 + #define DRIVER_NAME "ht16k33" 46 + 47 + #define MIN_BRIGHTNESS 0x1 48 + #define MAX_BRIGHTNESS 0x10 49 + 50 + #define HT16K33_MATRIX_LED_MAX_COLS 8 51 + #define HT16K33_MATRIX_LED_MAX_ROWS 16 52 + #define HT16K33_MATRIX_KEYPAD_MAX_COLS 3 53 + #define HT16K33_MATRIX_KEYPAD_MAX_ROWS 12 54 + 55 + #define BYTES_PER_ROW (HT16K33_MATRIX_LED_MAX_ROWS / 8) 56 + #define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW) 57 + 58 + struct ht16k33_keypad { 59 + struct input_dev *dev; 60 + spinlock_t lock; 61 + struct delayed_work work; 62 + uint32_t cols; 63 + uint32_t rows; 64 + uint32_t row_shift; 65 + uint32_t debounce_ms; 66 + uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS]; 67 + }; 68 + 69 + struct ht16k33_fbdev { 70 + struct fb_info *info; 71 + uint32_t refresh_rate; 72 + uint8_t *buffer; 73 + uint8_t *cache; 74 + struct delayed_work work; 75 + }; 76 + 77 + struct ht16k33_priv { 78 + struct i2c_client *client; 79 + struct ht16k33_keypad keypad; 80 + struct ht16k33_fbdev fbdev; 81 + struct workqueue_struct *workqueue; 82 + }; 83 + 84 + static struct fb_fix_screeninfo ht16k33_fb_fix = { 85 + .id = DRIVER_NAME, 86 + .type = FB_TYPE_PACKED_PIXELS, 87 + .visual = FB_VISUAL_MONO10, 88 + .xpanstep = 0, 89 + .ypanstep = 0, 90 + .ywrapstep = 0, 91 + .line_length = HT16K33_MATRIX_LED_MAX_ROWS, 92 + .accel = FB_ACCEL_NONE, 93 + }; 94 + 95 + static struct fb_var_screeninfo ht16k33_fb_var = { 96 + .xres = HT16K33_MATRIX_LED_MAX_ROWS, 97 + .yres = HT16K33_MATRIX_LED_MAX_COLS, 98 + .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 99 + .yres_virtual = HT16K33_MATRIX_LED_MAX_COLS, 100 + .bits_per_pixel = 1, 101 + .red = { 0, 1, 0 }, 102 + .green = { 0, 1, 0 }, 103 + .blue = { 0, 1, 0 }, 104 + .left_margin = 0, 105 + .right_margin = 0, 106 + .upper_margin = 0, 107 + .lower_margin = 0, 108 + .vmode = FB_VMODE_NONINTERLACED, 109 + }; 110 + 111 + static int ht16k33_display_on(struct ht16k33_priv *priv) 112 + { 113 + uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON; 114 + 115 + return i2c_smbus_write_byte(priv->client, data); 116 + } 117 + 118 + static int ht16k33_display_off(struct ht16k33_priv *priv) 119 + { 120 + return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP); 121 + } 122 + 123 + static void ht16k33_fb_queue(struct ht16k33_priv *priv) 124 + { 125 + struct ht16k33_fbdev *fbdev = &priv->fbdev; 126 + 127 + queue_delayed_work(priv->workqueue, &fbdev->work, 128 + msecs_to_jiffies(HZ / fbdev->refresh_rate)); 129 + } 130 + 131 + static void ht16k33_keypad_queue(struct ht16k33_priv *priv) 132 + { 133 + struct ht16k33_keypad *keypad = &priv->keypad; 134 + 135 + queue_delayed_work(priv->workqueue, &keypad->work, 136 + msecs_to_jiffies(keypad->debounce_ms)); 137 + } 138 + 139 + /* 140 + * This gets the fb data from cache and copies it to ht16k33 display RAM 141 + */ 142 + static void ht16k33_fb_update(struct work_struct *work) 143 + { 144 + struct ht16k33_fbdev *fbdev = 145 + container_of(work, struct ht16k33_fbdev, work.work); 146 + struct ht16k33_priv *priv = 147 + container_of(fbdev, struct ht16k33_priv, fbdev); 148 + 149 + uint8_t *p1, *p2; 150 + int len, pos = 0, first = -1; 151 + 152 + p1 = fbdev->cache; 153 + p2 = fbdev->buffer; 154 + 155 + /* Search for the first byte with changes */ 156 + while (pos < HT16K33_FB_SIZE && first < 0) { 157 + if (*(p1++) - *(p2++)) 158 + first = pos; 159 + pos++; 160 + } 161 + 162 + /* No changes found */ 163 + if (first < 0) 164 + goto requeue; 165 + 166 + len = HT16K33_FB_SIZE - first; 167 + p1 = fbdev->cache + HT16K33_FB_SIZE - 1; 168 + p2 = fbdev->buffer + HT16K33_FB_SIZE - 1; 169 + 170 + /* Determine i2c transfer length */ 171 + while (len > 1) { 172 + if (*(p1--) - *(p2--)) 173 + break; 174 + len--; 175 + } 176 + 177 + p1 = fbdev->cache + first; 178 + p2 = fbdev->buffer + first; 179 + if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2)) 180 + memcpy(p1, p2, len); 181 + requeue: 182 + ht16k33_fb_queue(priv); 183 + } 184 + 185 + static int ht16k33_keypad_start(struct input_dev *dev) 186 + { 187 + struct ht16k33_priv *priv = input_get_drvdata(dev); 188 + struct ht16k33_keypad *keypad = &priv->keypad; 189 + 190 + /* 191 + * Schedule an immediate key scan to capture current key state; 192 + * columns will be activated and IRQs be enabled after the scan. 193 + */ 194 + queue_delayed_work(priv->workqueue, &keypad->work, 0); 195 + return 0; 196 + } 197 + 198 + static void ht16k33_keypad_stop(struct input_dev *dev) 199 + { 200 + struct ht16k33_priv *priv = input_get_drvdata(dev); 201 + struct ht16k33_keypad *keypad = &priv->keypad; 202 + 203 + cancel_delayed_work(&keypad->work); 204 + /* 205 + * ht16k33_keypad_scan() will leave IRQs enabled; 206 + * we should disable them now. 207 + */ 208 + disable_irq_nosync(priv->client->irq); 209 + } 210 + 211 + static int ht16k33_initialize(struct ht16k33_priv *priv) 212 + { 213 + uint8_t byte; 214 + int err; 215 + uint8_t data[HT16K33_MATRIX_LED_MAX_COLS * 2]; 216 + 217 + /* Clear RAM (8 * 16 bits) */ 218 + memset(data, 0, sizeof(data)); 219 + err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data); 220 + if (err) 221 + return err; 222 + 223 + /* Turn on internal oscillator */ 224 + byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP; 225 + err = i2c_smbus_write_byte(priv->client, byte); 226 + if (err) 227 + return err; 228 + 229 + /* Configure INT pin */ 230 + byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH; 231 + if (priv->client->irq > 0) 232 + byte |= REG_ROWINT_SET_INT_EN; 233 + return i2c_smbus_write_byte(priv->client, byte); 234 + } 235 + 236 + /* 237 + * This gets the keys from keypad and reports it to input subsystem 238 + */ 239 + static void ht16k33_keypad_scan(struct work_struct *work) 240 + { 241 + struct ht16k33_keypad *keypad = 242 + container_of(work, struct ht16k33_keypad, work.work); 243 + struct ht16k33_priv *priv = 244 + container_of(keypad, struct ht16k33_priv, keypad); 245 + const unsigned short *keycodes = keypad->dev->keycode; 246 + uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS]; 247 + uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2]; 248 + int row, col, code; 249 + bool reschedule = false; 250 + 251 + if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) { 252 + dev_err(&priv->client->dev, "Failed to read key data\n"); 253 + goto end; 254 + } 255 + 256 + for (col = 0; col < keypad->cols; col++) { 257 + new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2]; 258 + if (new_state[col]) 259 + reschedule = true; 260 + bits_changed = keypad->last_key_state[col] ^ new_state[col]; 261 + 262 + while (bits_changed) { 263 + row = ffs(bits_changed) - 1; 264 + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); 265 + input_event(keypad->dev, EV_MSC, MSC_SCAN, code); 266 + input_report_key(keypad->dev, keycodes[code], 267 + new_state[col] & BIT(row)); 268 + bits_changed &= ~BIT(row); 269 + } 270 + } 271 + input_sync(keypad->dev); 272 + memcpy(keypad->last_key_state, new_state, sizeof(new_state)); 273 + 274 + end: 275 + if (reschedule) 276 + ht16k33_keypad_queue(priv); 277 + else 278 + enable_irq(priv->client->irq); 279 + } 280 + 281 + static irqreturn_t ht16k33_irq_thread(int irq, void *dev) 282 + { 283 + struct ht16k33_priv *priv = dev; 284 + 285 + disable_irq_nosync(priv->client->irq); 286 + ht16k33_keypad_queue(priv); 287 + 288 + return IRQ_HANDLED; 289 + } 290 + 291 + static int ht16k33_bl_update_status(struct backlight_device *bl) 292 + { 293 + int brightness = bl->props.brightness; 294 + struct ht16k33_priv *priv = bl_get_data(bl); 295 + 296 + if (bl->props.power != FB_BLANK_UNBLANK || 297 + bl->props.fb_blank != FB_BLANK_UNBLANK || 298 + bl->props.state & BL_CORE_FBBLANK || brightness == 0) { 299 + return ht16k33_display_off(priv); 300 + } 301 + 302 + ht16k33_display_on(priv); 303 + return i2c_smbus_write_byte(priv->client, 304 + REG_BRIGHTNESS | (brightness - 1)); 305 + } 306 + 307 + static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi) 308 + { 309 + struct ht16k33_priv *priv = bl_get_data(bl); 310 + 311 + return (fi == NULL) || (fi->par == priv); 312 + } 313 + 314 + static const struct backlight_ops ht16k33_bl_ops = { 315 + .update_status = ht16k33_bl_update_status, 316 + .check_fb = ht16k33_bl_check_fb, 317 + }; 318 + 319 + static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) 320 + { 321 + struct ht16k33_priv *priv = info->par; 322 + 323 + return vm_insert_page(vma, vma->vm_start, 324 + virt_to_page(priv->fbdev.buffer)); 325 + } 326 + 327 + static struct fb_ops ht16k33_fb_ops = { 328 + .owner = THIS_MODULE, 329 + .fb_read = fb_sys_read, 330 + .fb_write = fb_sys_write, 331 + .fb_fillrect = sys_fillrect, 332 + .fb_copyarea = sys_copyarea, 333 + .fb_imageblit = sys_imageblit, 334 + .fb_mmap = ht16k33_mmap, 335 + }; 336 + 337 + static int ht16k33_probe(struct i2c_client *client, 338 + const struct i2c_device_id *id) 339 + { 340 + int err; 341 + uint32_t rows, cols, dft_brightness; 342 + struct backlight_device *bl; 343 + struct backlight_properties bl_props; 344 + struct ht16k33_priv *priv; 345 + struct ht16k33_keypad *keypad; 346 + struct ht16k33_fbdev *fbdev; 347 + struct device_node *node = client->dev.of_node; 348 + 349 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { 350 + dev_err(&client->dev, "i2c_check_functionality error\n"); 351 + return -EIO; 352 + } 353 + 354 + if (client->irq <= 0) { 355 + dev_err(&client->dev, "No IRQ specified\n"); 356 + return -EINVAL; 357 + } 358 + 359 + priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); 360 + if (!priv) 361 + return -ENOMEM; 362 + 363 + priv->client = client; 364 + i2c_set_clientdata(client, priv); 365 + fbdev = &priv->fbdev; 366 + keypad = &priv->keypad; 367 + 368 + priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq"); 369 + if (priv->workqueue == NULL) 370 + return -ENOMEM; 371 + 372 + err = ht16k33_initialize(priv); 373 + if (err) 374 + goto err_destroy_wq; 375 + 376 + /* Framebuffer (2 bytes per column) */ 377 + BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE); 378 + fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL); 379 + if (!fbdev->buffer) { 380 + err = -ENOMEM; 381 + goto err_free_fbdev; 382 + } 383 + 384 + fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL); 385 + if (!fbdev->cache) { 386 + err = -ENOMEM; 387 + goto err_fbdev_buffer; 388 + } 389 + 390 + fbdev->info = framebuffer_alloc(0, &client->dev); 391 + if (!fbdev->info) { 392 + err = -ENOMEM; 393 + goto err_fbdev_buffer; 394 + } 395 + 396 + err = of_property_read_u32(node, "refresh-rate-hz", 397 + &fbdev->refresh_rate); 398 + if (err) { 399 + dev_err(&client->dev, "refresh rate not specified\n"); 400 + goto err_fbdev_info; 401 + } 402 + fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS); 403 + 404 + INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update); 405 + fbdev->info->fbops = &ht16k33_fb_ops; 406 + fbdev->info->screen_base = (char __iomem *) fbdev->buffer; 407 + fbdev->info->screen_size = HT16K33_FB_SIZE; 408 + fbdev->info->fix = ht16k33_fb_fix; 409 + fbdev->info->var = ht16k33_fb_var; 410 + fbdev->info->pseudo_palette = NULL; 411 + fbdev->info->flags = FBINFO_FLAG_DEFAULT; 412 + fbdev->info->par = priv; 413 + 414 + err = register_framebuffer(fbdev->info); 415 + if (err) 416 + goto err_fbdev_info; 417 + 418 + /* Keypad */ 419 + keypad->dev = devm_input_allocate_device(&client->dev); 420 + if (!keypad->dev) { 421 + err = -ENOMEM; 422 + goto err_fbdev_unregister; 423 + } 424 + 425 + keypad->dev->name = DRIVER_NAME"-keypad"; 426 + keypad->dev->id.bustype = BUS_I2C; 427 + keypad->dev->open = ht16k33_keypad_start; 428 + keypad->dev->close = ht16k33_keypad_stop; 429 + 430 + if (!of_get_property(node, "linux,no-autorepeat", NULL)) 431 + __set_bit(EV_REP, keypad->dev->evbit); 432 + 433 + err = of_property_read_u32(node, "debounce-delay-ms", 434 + &keypad->debounce_ms); 435 + if (err) { 436 + dev_err(&client->dev, "key debounce delay not specified\n"); 437 + goto err_fbdev_unregister; 438 + } 439 + 440 + err = devm_request_threaded_irq(&client->dev, client->irq, NULL, 441 + ht16k33_irq_thread, 442 + IRQF_TRIGGER_RISING | IRQF_ONESHOT, 443 + DRIVER_NAME, priv); 444 + if (err) { 445 + dev_err(&client->dev, "irq request failed %d, error %d\n", 446 + client->irq, err); 447 + goto err_fbdev_unregister; 448 + } 449 + 450 + disable_irq_nosync(client->irq); 451 + rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS; 452 + cols = HT16K33_MATRIX_KEYPAD_MAX_COLS; 453 + err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols); 454 + if (err) 455 + goto err_fbdev_unregister; 456 + 457 + err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL, 458 + keypad->dev); 459 + if (err) { 460 + dev_err(&client->dev, "failed to build keymap\n"); 461 + goto err_fbdev_unregister; 462 + } 463 + 464 + input_set_drvdata(keypad->dev, priv); 465 + keypad->rows = rows; 466 + keypad->cols = cols; 467 + keypad->row_shift = get_count_order(cols); 468 + INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan); 469 + 470 + err = input_register_device(keypad->dev); 471 + if (err) 472 + goto err_fbdev_unregister; 473 + 474 + /* Backlight */ 475 + memset(&bl_props, 0, sizeof(struct backlight_properties)); 476 + bl_props.type = BACKLIGHT_RAW; 477 + bl_props.max_brightness = MAX_BRIGHTNESS; 478 + 479 + bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl", 480 + &client->dev, priv, 481 + &ht16k33_bl_ops, &bl_props); 482 + if (IS_ERR(bl)) { 483 + dev_err(&client->dev, "failed to register backlight\n"); 484 + err = PTR_ERR(bl); 485 + goto err_keypad_unregister; 486 + } 487 + 488 + err = of_property_read_u32(node, "default-brightness-level", 489 + &dft_brightness); 490 + if (err) { 491 + dft_brightness = MAX_BRIGHTNESS; 492 + } else if (dft_brightness > MAX_BRIGHTNESS) { 493 + dev_warn(&client->dev, 494 + "invalid default brightness level: %u, using %u\n", 495 + dft_brightness, MAX_BRIGHTNESS); 496 + dft_brightness = MAX_BRIGHTNESS; 497 + } 498 + 499 + bl->props.brightness = dft_brightness; 500 + ht16k33_bl_update_status(bl); 501 + 502 + ht16k33_fb_queue(priv); 503 + return 0; 504 + 505 + err_keypad_unregister: 506 + input_unregister_device(keypad->dev); 507 + err_fbdev_unregister: 508 + unregister_framebuffer(fbdev->info); 509 + err_fbdev_info: 510 + framebuffer_release(fbdev->info); 511 + err_fbdev_buffer: 512 + free_page((unsigned long) fbdev->buffer); 513 + err_free_fbdev: 514 + kfree(fbdev); 515 + err_destroy_wq: 516 + destroy_workqueue(priv->workqueue); 517 + 518 + return err; 519 + } 520 + 521 + static int ht16k33_remove(struct i2c_client *client) 522 + { 523 + struct ht16k33_priv *priv = i2c_get_clientdata(client); 524 + struct ht16k33_keypad *keypad = &priv->keypad; 525 + struct ht16k33_fbdev *fbdev = &priv->fbdev; 526 + 527 + ht16k33_keypad_stop(keypad->dev); 528 + 529 + cancel_delayed_work(&fbdev->work); 530 + unregister_framebuffer(fbdev->info); 531 + framebuffer_release(fbdev->info); 532 + free_page((unsigned long) fbdev->buffer); 533 + 534 + destroy_workqueue(priv->workqueue); 535 + return 0; 536 + } 537 + 538 + static const struct i2c_device_id ht16k33_i2c_match[] = { 539 + { "ht16k33", 0 }, 540 + { } 541 + }; 542 + MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match); 543 + 544 + static const struct of_device_id ht16k33_of_match[] = { 545 + { .compatible = "holtek,ht16k33", }, 546 + { } 547 + }; 548 + MODULE_DEVICE_TABLE(of, ht16k33_of_match); 549 + 550 + static struct i2c_driver ht16k33_driver = { 551 + .probe = ht16k33_probe, 552 + .remove = ht16k33_remove, 553 + .driver = { 554 + .name = DRIVER_NAME, 555 + .of_match_table = of_match_ptr(ht16k33_of_match), 556 + }, 557 + .id_table = ht16k33_i2c_match, 558 + }; 559 + module_i2c_driver(ht16k33_driver); 560 + 561 + MODULE_DESCRIPTION("Holtek HT16K33 driver"); 562 + MODULE_LICENSE("GPL"); 563 + MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
+1 -2
drivers/char/Kconfig
··· 17 17 18 18 config DEVKMEM 19 19 bool "/dev/kmem virtual device support" 20 - default y 21 20 help 22 21 Say Y here if you want to support the /dev/kmem device. The 23 22 /dev/kmem device is rarely used, but can be used for certain ··· 578 579 source "drivers/s390/char/Kconfig" 579 580 580 581 config TILE_SROM 581 - bool "Character-device access via hypervisor to the Tilera SPI ROM" 582 + tristate "Character-device access via hypervisor to the Tilera SPI ROM" 582 583 depends on TILE 583 584 default y 584 585 ---help---
+11
drivers/char/pcmcia/Kconfig
··· 43 43 (http://www.omnikey.com/), or a current development version of OpenCT 44 44 (http://www.opensc-project.org/opensc). 45 45 46 + config SCR24X 47 + tristate "SCR24x Chip Card Interface support" 48 + depends on PCMCIA 49 + help 50 + Enable support for the SCR24x PCMCIA Chip Card Interface. 51 + 52 + To compile this driver as a module, choose M here. 53 + The module will be called scr24x_cs.. 54 + 55 + If unsure say N. 56 + 46 57 config IPWIRELESS 47 58 tristate "IPWireless 3G UMTS PCMCIA card support" 48 59 depends on PCMCIA && NETDEVICES && TTY
+1
drivers/char/pcmcia/Makefile
··· 7 7 obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o 8 8 obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o 9 9 obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o 10 + obj-$(CONFIG_SCR24X) += scr24x_cs.o
+373
drivers/char/pcmcia/scr24x_cs.c
··· 1 + /* 2 + * SCR24x PCMCIA Smart Card Reader Driver 3 + * 4 + * Copyright (C) 2005-2006 TL Sudheendran 5 + * Copyright (C) 2016 Lubomir Rintel 6 + * 7 + * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran. 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2, or (at your option) 12 + * any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, 15 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this program; see the file COPYING. If not, write to 21 + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 22 + */ 23 + 24 + #include <linux/device.h> 25 + #include <linux/module.h> 26 + #include <linux/delay.h> 27 + #include <linux/cdev.h> 28 + #include <linux/slab.h> 29 + #include <linux/fs.h> 30 + #include <linux/io.h> 31 + #include <linux/uaccess.h> 32 + 33 + #include <pcmcia/cistpl.h> 34 + #include <pcmcia/ds.h> 35 + 36 + #define CCID_HEADER_SIZE 10 37 + #define CCID_LENGTH_OFFSET 1 38 + #define CCID_MAX_LEN 271 39 + 40 + #define SCR24X_DATA(n) (1 + n) 41 + #define SCR24X_CMD_STATUS 7 42 + #define CMD_START 0x40 43 + #define CMD_WRITE_BYTE 0x41 44 + #define CMD_READ_BYTE 0x42 45 + #define STATUS_BUSY 0x80 46 + 47 + struct scr24x_dev { 48 + struct device *dev; 49 + struct cdev c_dev; 50 + unsigned char buf[CCID_MAX_LEN]; 51 + int devno; 52 + struct mutex lock; 53 + struct kref refcnt; 54 + u8 __iomem *regs; 55 + }; 56 + 57 + #define SCR24X_DEVS 8 58 + static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS); 59 + 60 + static struct class *scr24x_class; 61 + static dev_t scr24x_devt; 62 + 63 + static void scr24x_delete(struct kref *kref) 64 + { 65 + struct scr24x_dev *dev = container_of(kref, struct scr24x_dev, 66 + refcnt); 67 + 68 + kfree(dev); 69 + } 70 + 71 + static int scr24x_wait_ready(struct scr24x_dev *dev) 72 + { 73 + u_char status; 74 + int timeout = 100; 75 + 76 + do { 77 + status = ioread8(dev->regs + SCR24X_CMD_STATUS); 78 + if (!(status & STATUS_BUSY)) 79 + return 0; 80 + 81 + msleep(20); 82 + } while (--timeout); 83 + 84 + return -EIO; 85 + } 86 + 87 + static int scr24x_open(struct inode *inode, struct file *filp) 88 + { 89 + struct scr24x_dev *dev = container_of(inode->i_cdev, 90 + struct scr24x_dev, c_dev); 91 + 92 + kref_get(&dev->refcnt); 93 + filp->private_data = dev; 94 + 95 + return nonseekable_open(inode, filp); 96 + } 97 + 98 + static int scr24x_release(struct inode *inode, struct file *filp) 99 + { 100 + struct scr24x_dev *dev = filp->private_data; 101 + 102 + /* We must not take the dev->lock here as scr24x_delete() 103 + * might be called to remove the dev structure altogether. 104 + * We don't need the lock anyway, since after the reference 105 + * acquired in probe() is released in remove() the chrdev 106 + * is already unregistered and noone can possibly acquire 107 + * a reference via open() anymore. */ 108 + kref_put(&dev->refcnt, scr24x_delete); 109 + return 0; 110 + } 111 + 112 + static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit) 113 + { 114 + size_t i, y; 115 + int ret; 116 + 117 + for (i = offset; i < limit; i += 5) { 118 + iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS); 119 + ret = scr24x_wait_ready(dev); 120 + if (ret < 0) 121 + return ret; 122 + 123 + for (y = 0; y < 5 && i + y < limit; y++) 124 + dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y)); 125 + } 126 + 127 + return 0; 128 + } 129 + 130 + static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count, 131 + loff_t *ppos) 132 + { 133 + struct scr24x_dev *dev = filp->private_data; 134 + int ret; 135 + int len; 136 + 137 + if (count < CCID_HEADER_SIZE) 138 + return -EINVAL; 139 + 140 + if (mutex_lock_interruptible(&dev->lock)) 141 + return -ERESTARTSYS; 142 + 143 + if (!dev->dev) { 144 + ret = -ENODEV; 145 + goto out; 146 + } 147 + 148 + ret = scr24x_wait_ready(dev); 149 + if (ret < 0) 150 + goto out; 151 + len = CCID_HEADER_SIZE; 152 + ret = read_chunk(dev, 0, len); 153 + if (ret < 0) 154 + goto out; 155 + 156 + len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET])); 157 + if (len > sizeof(dev->buf)) { 158 + ret = -EIO; 159 + goto out; 160 + } 161 + ret = read_chunk(dev, CCID_HEADER_SIZE, len); 162 + if (ret < 0) 163 + goto out; 164 + 165 + if (len < count) 166 + count = len; 167 + 168 + if (copy_to_user(buf, dev->buf, count)) { 169 + ret = -EFAULT; 170 + goto out; 171 + } 172 + 173 + ret = count; 174 + out: 175 + mutex_unlock(&dev->lock); 176 + return ret; 177 + } 178 + 179 + static ssize_t scr24x_write(struct file *filp, const char __user *buf, 180 + size_t count, loff_t *ppos) 181 + { 182 + struct scr24x_dev *dev = filp->private_data; 183 + size_t i, y; 184 + int ret; 185 + 186 + if (mutex_lock_interruptible(&dev->lock)) 187 + return -ERESTARTSYS; 188 + 189 + if (!dev->dev) { 190 + ret = -ENODEV; 191 + goto out; 192 + } 193 + 194 + if (count > sizeof(dev->buf)) { 195 + ret = -EINVAL; 196 + goto out; 197 + } 198 + 199 + if (copy_from_user(dev->buf, buf, count)) { 200 + ret = -EFAULT; 201 + goto out; 202 + } 203 + 204 + ret = scr24x_wait_ready(dev); 205 + if (ret < 0) 206 + goto out; 207 + 208 + iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS); 209 + ret = scr24x_wait_ready(dev); 210 + if (ret < 0) 211 + goto out; 212 + 213 + for (i = 0; i < count; i += 5) { 214 + for (y = 0; y < 5 && i + y < count; y++) 215 + iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y)); 216 + 217 + iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS); 218 + ret = scr24x_wait_ready(dev); 219 + if (ret < 0) 220 + goto out; 221 + } 222 + 223 + ret = count; 224 + out: 225 + mutex_unlock(&dev->lock); 226 + return ret; 227 + } 228 + 229 + static const struct file_operations scr24x_fops = { 230 + .owner = THIS_MODULE, 231 + .read = scr24x_read, 232 + .write = scr24x_write, 233 + .open = scr24x_open, 234 + .release = scr24x_release, 235 + .llseek = no_llseek, 236 + }; 237 + 238 + static int scr24x_config_check(struct pcmcia_device *link, void *priv_data) 239 + { 240 + if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11) 241 + return -ENODEV; 242 + return pcmcia_request_io(link); 243 + } 244 + 245 + static int scr24x_probe(struct pcmcia_device *link) 246 + { 247 + struct scr24x_dev *dev; 248 + int ret; 249 + 250 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 251 + if (!dev) 252 + return -ENOMEM; 253 + 254 + dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS); 255 + if (dev->devno >= SCR24X_DEVS) { 256 + ret = -EBUSY; 257 + goto err; 258 + } 259 + 260 + mutex_init(&dev->lock); 261 + kref_init(&dev->refcnt); 262 + 263 + link->priv = dev; 264 + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; 265 + 266 + ret = pcmcia_loop_config(link, scr24x_config_check, NULL); 267 + if (ret < 0) 268 + goto err; 269 + 270 + dev->dev = &link->dev; 271 + dev->regs = devm_ioport_map(&link->dev, 272 + link->resource[PCMCIA_IOPORT_0]->start, 273 + resource_size(link->resource[PCMCIA_IOPORT_0])); 274 + if (!dev->regs) { 275 + ret = -EIO; 276 + goto err; 277 + } 278 + 279 + cdev_init(&dev->c_dev, &scr24x_fops); 280 + dev->c_dev.owner = THIS_MODULE; 281 + dev->c_dev.ops = &scr24x_fops; 282 + ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1); 283 + if (ret < 0) 284 + goto err; 285 + 286 + ret = pcmcia_enable_device(link); 287 + if (ret < 0) { 288 + pcmcia_disable_device(link); 289 + goto err; 290 + } 291 + 292 + device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno), 293 + NULL, "scr24x%d", dev->devno); 294 + 295 + dev_info(&link->dev, "SCR24x Chip Card Interface\n"); 296 + return 0; 297 + 298 + err: 299 + if (dev->devno < SCR24X_DEVS) 300 + clear_bit(dev->devno, scr24x_minors); 301 + kfree (dev); 302 + return ret; 303 + } 304 + 305 + static void scr24x_remove(struct pcmcia_device *link) 306 + { 307 + struct scr24x_dev *dev = (struct scr24x_dev *)link->priv; 308 + 309 + device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno)); 310 + mutex_lock(&dev->lock); 311 + pcmcia_disable_device(link); 312 + cdev_del(&dev->c_dev); 313 + clear_bit(dev->devno, scr24x_minors); 314 + dev->dev = NULL; 315 + mutex_unlock(&dev->lock); 316 + 317 + kref_put(&dev->refcnt, scr24x_delete); 318 + } 319 + 320 + static const struct pcmcia_device_id scr24x_ids[] = { 321 + PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader", 322 + 0x53cb94f9, 0xbfdf89a5), 323 + PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3), 324 + PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de), 325 + PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665), 326 + PCMCIA_DEVICE_NULL 327 + }; 328 + MODULE_DEVICE_TABLE(pcmcia, scr24x_ids); 329 + 330 + static struct pcmcia_driver scr24x_driver = { 331 + .owner = THIS_MODULE, 332 + .name = "scr24x_cs", 333 + .probe = scr24x_probe, 334 + .remove = scr24x_remove, 335 + .id_table = scr24x_ids, 336 + }; 337 + 338 + static int __init scr24x_init(void) 339 + { 340 + int ret; 341 + 342 + scr24x_class = class_create(THIS_MODULE, "scr24x"); 343 + if (IS_ERR(scr24x_class)) 344 + return PTR_ERR(scr24x_class); 345 + 346 + ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x"); 347 + if (ret < 0) { 348 + class_destroy(scr24x_class); 349 + return ret; 350 + } 351 + 352 + ret = pcmcia_register_driver(&scr24x_driver); 353 + if (ret < 0) { 354 + unregister_chrdev_region(scr24x_devt, SCR24X_DEVS); 355 + class_destroy(scr24x_class); 356 + } 357 + 358 + return ret; 359 + } 360 + 361 + static void __exit scr24x_exit(void) 362 + { 363 + pcmcia_unregister_driver(&scr24x_driver); 364 + unregister_chrdev_region(scr24x_devt, SCR24X_DEVS); 365 + class_destroy(scr24x_class); 366 + } 367 + 368 + module_init(scr24x_init); 369 + module_exit(scr24x_exit); 370 + 371 + MODULE_AUTHOR("Lubomir Rintel"); 372 + MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver"); 373 + MODULE_LICENSE("GPL");
+27 -9
drivers/char/ppdev.c
··· 86 86 long default_inactivity; 87 87 }; 88 88 89 + /* should we use PARDEVICE_MAX here? */ 90 + static struct device *devices[PARPORT_MAX]; 91 + 89 92 /* pp_struct.flags bitfields */ 90 93 #define PP_CLAIMED (1<<0) 91 94 #define PP_EXCL (1<<1) ··· 297 294 298 295 port = parport_find_number(minor); 299 296 if (!port) { 300 - printk(KERN_WARNING "%s: no associated port!\n", name); 297 + pr_warn("%s: no associated port!\n", name); 301 298 kfree(name); 302 299 return -ENXIO; 303 300 } ··· 308 305 ppdev_cb.private = pp; 309 306 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 310 307 parport_put_port(port); 308 + kfree(name); 311 309 312 310 if (!pdev) { 313 - printk(KERN_WARNING "%s: failed to register device!\n", name); 314 - kfree(name); 311 + pr_warn("%s: failed to register device!\n", name); 315 312 return -ENXIO; 316 313 } 317 314 ··· 792 789 793 790 static void pp_attach(struct parport *port) 794 791 { 795 - device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), 796 - NULL, "parport%d", port->number); 792 + struct device *ret; 793 + 794 + if (devices[port->number]) 795 + return; 796 + 797 + ret = device_create(ppdev_class, port->dev, 798 + MKDEV(PP_MAJOR, port->number), NULL, 799 + "parport%d", port->number); 800 + if (IS_ERR(ret)) { 801 + pr_err("Failed to create device parport%d\n", 802 + port->number); 803 + return; 804 + } 805 + devices[port->number] = ret; 797 806 } 798 807 799 808 static void pp_detach(struct parport *port) 800 809 { 810 + if (!devices[port->number]) 811 + return; 812 + 801 813 device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); 814 + devices[port->number] = NULL; 802 815 } 803 816 804 817 static int pp_probe(struct pardevice *par_dev) ··· 841 822 int err = 0; 842 823 843 824 if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) { 844 - printk(KERN_WARNING CHRDEV ": unable to get major %d\n", 845 - PP_MAJOR); 825 + pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR); 846 826 return -EIO; 847 827 } 848 828 ppdev_class = class_create(THIS_MODULE, CHRDEV); ··· 851 833 } 852 834 err = parport_register_driver(&pp_driver); 853 835 if (err < 0) { 854 - printk(KERN_WARNING CHRDEV ": unable to register with parport\n"); 836 + pr_warn(CHRDEV ": unable to register with parport\n"); 855 837 goto out_class; 856 838 } 857 839 858 - printk(KERN_INFO PP_VERSION "\n"); 840 + pr_info(PP_VERSION "\n"); 859 841 goto out; 860 842 861 843 out_class:
+1 -1
drivers/char/snsc.c
··· 285 285 DECLARE_WAITQUEUE(wait, current); 286 286 287 287 if (file->f_flags & O_NONBLOCK) { 288 - spin_unlock(&sd->sd_wlock); 288 + spin_unlock_irqrestore(&sd->sd_wlock, flags); 289 289 up(&sd->sd_wbs); 290 290 return -EAGAIN; 291 291 }
+2 -1
drivers/char/tile-srom.c
··· 312 312 313 313 static char *srom_devnode(struct device *dev, umode_t *mode) 314 314 { 315 - *mode = S_IRUGO | S_IWUSR; 315 + if (mode) 316 + *mode = 0644; 316 317 return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); 317 318 } 318 319
+38 -1
drivers/fpga/Kconfig
··· 13 13 14 14 if FPGA 15 15 16 + config FPGA_REGION 17 + tristate "FPGA Region" 18 + depends on OF && FPGA_BRIDGE 19 + help 20 + FPGA Regions allow loading FPGA images under control of 21 + the Device Tree. 22 + 16 23 config FPGA_MGR_SOCFPGA 17 24 tristate "Altera SOCFPGA FPGA Manager" 18 - depends on ARCH_SOCFPGA 25 + depends on ARCH_SOCFPGA || COMPILE_TEST 19 26 help 20 27 FPGA manager driver support for Altera SOCFPGA. 28 + 29 + config FPGA_MGR_SOCFPGA_A10 30 + tristate "Altera SoCFPGA Arria10" 31 + depends on ARCH_SOCFPGA || COMPILE_TEST 32 + select REGMAP_MMIO 33 + help 34 + FPGA manager driver support for Altera Arria10 SoCFPGA. 21 35 22 36 config FPGA_MGR_ZYNQ_FPGA 23 37 tristate "Xilinx Zynq FPGA" ··· 39 25 depends on HAS_DMA 40 26 help 41 27 FPGA manager driver support for Xilinx Zynq FPGAs. 28 + 29 + config FPGA_BRIDGE 30 + tristate "FPGA Bridge Framework" 31 + depends on OF 32 + help 33 + Say Y here if you want to support bridges connected between host 34 + processors and FPGAs or between FPGAs. 35 + 36 + config SOCFPGA_FPGA_BRIDGE 37 + tristate "Altera SoCFPGA FPGA Bridges" 38 + depends on ARCH_SOCFPGA && FPGA_BRIDGE 39 + help 40 + Say Y to enable drivers for FPGA bridges for Altera SOCFPGA 41 + devices. 42 + 43 + config ALTERA_FREEZE_BRIDGE 44 + tristate "Altera FPGA Freeze Bridge" 45 + depends on ARCH_SOCFPGA && FPGA_BRIDGE 46 + help 47 + Say Y to enable drivers for Altera FPGA Freeze bridges. A 48 + freeze bridge is a bridge that exists in the FPGA fabric to 49 + isolate one region of the FPGA from the busses while that 50 + region is being reprogrammed. 42 51 43 52 endif # FPGA 44 53
+9
drivers/fpga/Makefile
··· 7 7 8 8 # FPGA Manager Drivers 9 9 obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o 10 + obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o 10 11 obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o 12 + 13 + # FPGA Bridge Drivers 14 + obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o 15 + obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o 16 + obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o 17 + 18 + # High Level Interfaces 19 + obj-$(CONFIG_FPGA_REGION) += fpga-region.o
+180
drivers/fpga/altera-fpga2sdram.c
··· 1 + /* 2 + * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices 3 + * 4 + * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + /* 20 + * This driver manages a bridge between an FPGA and the SDRAM used by the ARM 21 + * host processor system (HPS). 22 + * 23 + * The bridge contains 4 read ports, 4 write ports, and 6 command ports. 24 + * Reconfiguring these ports requires that no SDRAM transactions occur during 25 + * reconfiguration. The code reconfiguring the ports cannot run out of SDRAM 26 + * nor can the FPGA access the SDRAM during reconfiguration. This driver does 27 + * not support reconfiguring the ports. The ports are configured by code 28 + * running out of on chip ram before Linux is started and the configuration 29 + * is passed in a handoff register in the system manager. 30 + * 31 + * This driver supports enabling and disabling of the configured ports, which 32 + * allows for safe reprogramming of the FPGA, assuming that the new FPGA image 33 + * uses the same port configuration. Bridges must be disabled before 34 + * reprogramming the FPGA and re-enabled after the FPGA has been programmed. 35 + */ 36 + 37 + #include <linux/fpga/fpga-bridge.h> 38 + #include <linux/kernel.h> 39 + #include <linux/mfd/syscon.h> 40 + #include <linux/module.h> 41 + #include <linux/of_platform.h> 42 + #include <linux/regmap.h> 43 + 44 + #define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80 45 + #define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK 0x00003fff 46 + #define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT 0 47 + #define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT 4 48 + #define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT 8 49 + 50 + /* 51 + * From the Cyclone V HPS Memory Map document: 52 + * These registers are used to store handoff information between the 53 + * preloader and the OS. These 8 registers can be used to store any 54 + * information. The contents of these registers have no impact on 55 + * the state of the HPS hardware. 56 + */ 57 + #define SYSMGR_ISWGRP_HANDOFF3 (0x8C) 58 + 59 + #define F2S_BRIDGE_NAME "fpga2sdram" 60 + 61 + struct alt_fpga2sdram_data { 62 + struct device *dev; 63 + struct regmap *sdrctl; 64 + int mask; 65 + }; 66 + 67 + static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge) 68 + { 69 + struct alt_fpga2sdram_data *priv = bridge->priv; 70 + int value; 71 + 72 + regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value); 73 + 74 + return (value & priv->mask) == priv->mask; 75 + } 76 + 77 + static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv, 78 + bool enable) 79 + { 80 + return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, 81 + priv->mask, enable ? priv->mask : 0); 82 + } 83 + 84 + static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable) 85 + { 86 + return _alt_fpga2sdram_enable_set(bridge->priv, enable); 87 + } 88 + 89 + struct prop_map { 90 + char *prop_name; 91 + u32 *prop_value; 92 + u32 prop_max; 93 + }; 94 + 95 + static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = { 96 + .enable_set = alt_fpga2sdram_enable_set, 97 + .enable_show = alt_fpga2sdram_enable_show, 98 + }; 99 + 100 + static const struct of_device_id altera_fpga_of_match[] = { 101 + { .compatible = "altr,socfpga-fpga2sdram-bridge" }, 102 + {}, 103 + }; 104 + 105 + static int alt_fpga_bridge_probe(struct platform_device *pdev) 106 + { 107 + struct device *dev = &pdev->dev; 108 + struct alt_fpga2sdram_data *priv; 109 + u32 enable; 110 + struct regmap *sysmgr; 111 + int ret = 0; 112 + 113 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 114 + if (!priv) 115 + return -ENOMEM; 116 + 117 + priv->dev = dev; 118 + 119 + priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl"); 120 + if (IS_ERR(priv->sdrctl)) { 121 + dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n"); 122 + return PTR_ERR(priv->sdrctl); 123 + } 124 + 125 + sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); 126 + if (IS_ERR(sysmgr)) { 127 + dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n"); 128 + return PTR_ERR(sysmgr); 129 + } 130 + 131 + /* Get f2s bridge configuration saved in handoff register */ 132 + regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask); 133 + 134 + ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME, 135 + &altera_fpga2sdram_br_ops, priv); 136 + if (ret) 137 + return ret; 138 + 139 + dev_info(dev, "driver initialized with handoff %08x\n", priv->mask); 140 + 141 + if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) { 142 + if (enable > 1) { 143 + dev_warn(dev, "invalid bridge-enable %u > 1\n", enable); 144 + } else { 145 + dev_info(dev, "%s bridge\n", 146 + (enable ? "enabling" : "disabling")); 147 + ret = _alt_fpga2sdram_enable_set(priv, enable); 148 + if (ret) { 149 + fpga_bridge_unregister(&pdev->dev); 150 + return ret; 151 + } 152 + } 153 + } 154 + 155 + return ret; 156 + } 157 + 158 + static int alt_fpga_bridge_remove(struct platform_device *pdev) 159 + { 160 + fpga_bridge_unregister(&pdev->dev); 161 + 162 + return 0; 163 + } 164 + 165 + MODULE_DEVICE_TABLE(of, altera_fpga_of_match); 166 + 167 + static struct platform_driver altera_fpga_driver = { 168 + .probe = alt_fpga_bridge_probe, 169 + .remove = alt_fpga_bridge_remove, 170 + .driver = { 171 + .name = "altera_fpga2sdram_bridge", 172 + .of_match_table = of_match_ptr(altera_fpga_of_match), 173 + }, 174 + }; 175 + 176 + module_platform_driver(altera_fpga_driver); 177 + 178 + MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge"); 179 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 180 + MODULE_LICENSE("GPL v2");
+273
drivers/fpga/altera-freeze-bridge.c
··· 1 + /* 2 + * FPGA Freeze Bridge Controller 3 + * 4 + * Copyright (C) 2016 Altera Corporation. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/delay.h> 19 + #include <linux/io.h> 20 + #include <linux/kernel.h> 21 + #include <linux/of_device.h> 22 + #include <linux/module.h> 23 + #include <linux/fpga/fpga-bridge.h> 24 + 25 + #define FREEZE_CSR_STATUS_OFFSET 0 26 + #define FREEZE_CSR_CTRL_OFFSET 4 27 + #define FREEZE_CSR_ILLEGAL_REQ_OFFSET 8 28 + #define FREEZE_CSR_REG_VERSION 12 29 + 30 + #define FREEZE_CSR_SUPPORTED_VERSION 2 31 + 32 + #define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0) 33 + #define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1) 34 + 35 + #define FREEZE_CSR_CTRL_FREEZE_REQ BIT(0) 36 + #define FREEZE_CSR_CTRL_RESET_REQ BIT(1) 37 + #define FREEZE_CSR_CTRL_UNFREEZE_REQ BIT(2) 38 + 39 + #define FREEZE_BRIDGE_NAME "freeze" 40 + 41 + struct altera_freeze_br_data { 42 + struct device *dev; 43 + void __iomem *base_addr; 44 + bool enable; 45 + }; 46 + 47 + /* 48 + * Poll status until status bit is set or we have a timeout. 49 + */ 50 + static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv, 51 + u32 timeout, u32 req_ack) 52 + { 53 + struct device *dev = priv->dev; 54 + void __iomem *csr_illegal_req_addr = priv->base_addr + 55 + FREEZE_CSR_ILLEGAL_REQ_OFFSET; 56 + u32 status, illegal, ctrl; 57 + int ret = -ETIMEDOUT; 58 + 59 + do { 60 + illegal = readl(csr_illegal_req_addr); 61 + if (illegal) { 62 + dev_err(dev, "illegal request detected 0x%x", illegal); 63 + 64 + writel(1, csr_illegal_req_addr); 65 + 66 + illegal = readl(csr_illegal_req_addr); 67 + if (illegal) 68 + dev_err(dev, "illegal request not cleared 0x%x", 69 + illegal); 70 + 71 + ret = -EINVAL; 72 + break; 73 + } 74 + 75 + status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET); 76 + dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack); 77 + status &= req_ack; 78 + if (status) { 79 + ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET); 80 + dev_dbg(dev, "%s request %x acknowledged %x %x\n", 81 + __func__, req_ack, status, ctrl); 82 + ret = 0; 83 + break; 84 + } 85 + 86 + udelay(1); 87 + } while (timeout--); 88 + 89 + if (ret == -ETIMEDOUT) 90 + dev_err(dev, "%s timeout waiting for 0x%x\n", 91 + __func__, req_ack); 92 + 93 + return ret; 94 + } 95 + 96 + static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv, 97 + u32 timeout) 98 + { 99 + struct device *dev = priv->dev; 100 + void __iomem *csr_ctrl_addr = priv->base_addr + 101 + FREEZE_CSR_CTRL_OFFSET; 102 + u32 status; 103 + int ret; 104 + 105 + status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET); 106 + 107 + dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr)); 108 + 109 + if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) { 110 + dev_dbg(dev, "%s bridge already disabled %d\n", 111 + __func__, status); 112 + return 0; 113 + } else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) { 114 + dev_err(dev, "%s bridge not enabled %d\n", __func__, status); 115 + return -EINVAL; 116 + } 117 + 118 + writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr); 119 + 120 + ret = altera_freeze_br_req_ack(priv, timeout, 121 + FREEZE_CSR_STATUS_FREEZE_REQ_DONE); 122 + 123 + if (ret) 124 + writel(0, csr_ctrl_addr); 125 + else 126 + writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr); 127 + 128 + return ret; 129 + } 130 + 131 + static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv, 132 + u32 timeout) 133 + { 134 + struct device *dev = priv->dev; 135 + void __iomem *csr_ctrl_addr = priv->base_addr + 136 + FREEZE_CSR_CTRL_OFFSET; 137 + u32 status; 138 + int ret; 139 + 140 + writel(0, csr_ctrl_addr); 141 + 142 + status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET); 143 + 144 + dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr)); 145 + 146 + if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) { 147 + dev_dbg(dev, "%s bridge already enabled %d\n", 148 + __func__, status); 149 + return 0; 150 + } else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) { 151 + dev_err(dev, "%s bridge not frozen %d\n", __func__, status); 152 + return -EINVAL; 153 + } 154 + 155 + writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr); 156 + 157 + ret = altera_freeze_br_req_ack(priv, timeout, 158 + FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE); 159 + 160 + status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET); 161 + 162 + dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr)); 163 + 164 + writel(0, csr_ctrl_addr); 165 + 166 + return ret; 167 + } 168 + 169 + /* 170 + * enable = 1 : allow traffic through the bridge 171 + * enable = 0 : disable traffic through the bridge 172 + */ 173 + static int altera_freeze_br_enable_set(struct fpga_bridge *bridge, 174 + bool enable) 175 + { 176 + struct altera_freeze_br_data *priv = bridge->priv; 177 + struct fpga_image_info *info = bridge->info; 178 + u32 timeout = 0; 179 + int ret; 180 + 181 + if (enable) { 182 + if (info) 183 + timeout = info->enable_timeout_us; 184 + 185 + ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout); 186 + } else { 187 + if (info) 188 + timeout = info->disable_timeout_us; 189 + 190 + ret = altera_freeze_br_do_freeze(bridge->priv, timeout); 191 + } 192 + 193 + if (!ret) 194 + priv->enable = enable; 195 + 196 + return ret; 197 + } 198 + 199 + static int altera_freeze_br_enable_show(struct fpga_bridge *bridge) 200 + { 201 + struct altera_freeze_br_data *priv = bridge->priv; 202 + 203 + return priv->enable; 204 + } 205 + 206 + static struct fpga_bridge_ops altera_freeze_br_br_ops = { 207 + .enable_set = altera_freeze_br_enable_set, 208 + .enable_show = altera_freeze_br_enable_show, 209 + }; 210 + 211 + static const struct of_device_id altera_freeze_br_of_match[] = { 212 + { .compatible = "altr,freeze-bridge-controller", }, 213 + {}, 214 + }; 215 + MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match); 216 + 217 + static int altera_freeze_br_probe(struct platform_device *pdev) 218 + { 219 + struct device *dev = &pdev->dev; 220 + struct device_node *np = pdev->dev.of_node; 221 + struct altera_freeze_br_data *priv; 222 + struct resource *res; 223 + u32 status, revision; 224 + 225 + if (!np) 226 + return -ENODEV; 227 + 228 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 229 + if (!priv) 230 + return -ENOMEM; 231 + 232 + priv->dev = dev; 233 + 234 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 235 + priv->base_addr = devm_ioremap_resource(dev, res); 236 + if (IS_ERR(priv->base_addr)) 237 + return PTR_ERR(priv->base_addr); 238 + 239 + status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET); 240 + if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) 241 + priv->enable = 1; 242 + 243 + revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION); 244 + if (revision != FREEZE_CSR_SUPPORTED_VERSION) 245 + dev_warn(dev, 246 + "%s Freeze Controller unexpected revision %d != %d\n", 247 + __func__, revision, FREEZE_CSR_SUPPORTED_VERSION); 248 + 249 + return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME, 250 + &altera_freeze_br_br_ops, priv); 251 + } 252 + 253 + static int altera_freeze_br_remove(struct platform_device *pdev) 254 + { 255 + fpga_bridge_unregister(&pdev->dev); 256 + 257 + return 0; 258 + } 259 + 260 + static struct platform_driver altera_freeze_br_driver = { 261 + .probe = altera_freeze_br_probe, 262 + .remove = altera_freeze_br_remove, 263 + .driver = { 264 + .name = "altera_freeze_br", 265 + .of_match_table = of_match_ptr(altera_freeze_br_of_match), 266 + }, 267 + }; 268 + 269 + module_platform_driver(altera_freeze_br_driver); 270 + 271 + MODULE_DESCRIPTION("Altera Freeze Bridge"); 272 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 273 + MODULE_LICENSE("GPL v2");
+222
drivers/fpga/altera-hps2fpga.c
··· 1 + /* 2 + * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices 3 + * 4 + * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved. 5 + * 6 + * Includes this patch from the mailing list: 7 + * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters 8 + * Signed-off-by: Anatolij Gustschin <agust@denx.de> 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms and conditions of the GNU General Public License, 12 + * version 2, as published by the Free Software Foundation. 13 + * 14 + * This program is distributed in the hope it will be useful, but WITHOUT 15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 + * more details. 18 + * 19 + * You should have received a copy of the GNU General Public License along with 20 + * this program. If not, see <http://www.gnu.org/licenses/>. 21 + */ 22 + 23 + /* 24 + * This driver manages bridges on a Altera SOCFPGA between the ARM host 25 + * processor system (HPS) and the embedded FPGA. 26 + * 27 + * This driver supports enabling and disabling of the configured ports, which 28 + * allows for safe reprogramming of the FPGA, assuming that the new FPGA image 29 + * uses the same port configuration. Bridges must be disabled before 30 + * reprogramming the FPGA and re-enabled after the FPGA has been programmed. 31 + */ 32 + 33 + #include <linux/clk.h> 34 + #include <linux/fpga/fpga-bridge.h> 35 + #include <linux/kernel.h> 36 + #include <linux/mfd/syscon.h> 37 + #include <linux/module.h> 38 + #include <linux/of_platform.h> 39 + #include <linux/regmap.h> 40 + #include <linux/reset.h> 41 + #include <linux/spinlock.h> 42 + 43 + #define ALT_L3_REMAP_OFST 0x0 44 + #define ALT_L3_REMAP_MPUZERO_MSK 0x00000001 45 + #define ALT_L3_REMAP_H2F_MSK 0x00000008 46 + #define ALT_L3_REMAP_LWH2F_MSK 0x00000010 47 + 48 + #define HPS2FPGA_BRIDGE_NAME "hps2fpga" 49 + #define LWHPS2FPGA_BRIDGE_NAME "lwhps2fpga" 50 + #define FPGA2HPS_BRIDGE_NAME "fpga2hps" 51 + 52 + struct altera_hps2fpga_data { 53 + const char *name; 54 + struct reset_control *bridge_reset; 55 + struct regmap *l3reg; 56 + unsigned int remap_mask; 57 + struct clk *clk; 58 + }; 59 + 60 + static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge) 61 + { 62 + struct altera_hps2fpga_data *priv = bridge->priv; 63 + 64 + return reset_control_status(priv->bridge_reset); 65 + } 66 + 67 + /* The L3 REMAP register is write only, so keep a cached value. */ 68 + static unsigned int l3_remap_shadow; 69 + static spinlock_t l3_remap_lock; 70 + 71 + static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv, 72 + bool enable) 73 + { 74 + unsigned long flags; 75 + int ret; 76 + 77 + /* bring bridge out of reset */ 78 + if (enable) 79 + ret = reset_control_deassert(priv->bridge_reset); 80 + else 81 + ret = reset_control_assert(priv->bridge_reset); 82 + if (ret) 83 + return ret; 84 + 85 + /* Allow bridge to be visible to L3 masters or not */ 86 + if (priv->remap_mask) { 87 + spin_lock_irqsave(&l3_remap_lock, flags); 88 + l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK; 89 + 90 + if (enable) 91 + l3_remap_shadow |= priv->remap_mask; 92 + else 93 + l3_remap_shadow &= ~priv->remap_mask; 94 + 95 + ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST, 96 + l3_remap_shadow); 97 + spin_unlock_irqrestore(&l3_remap_lock, flags); 98 + } 99 + 100 + return ret; 101 + } 102 + 103 + static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable) 104 + { 105 + return _alt_hps2fpga_enable_set(bridge->priv, enable); 106 + } 107 + 108 + static const struct fpga_bridge_ops altera_hps2fpga_br_ops = { 109 + .enable_set = alt_hps2fpga_enable_set, 110 + .enable_show = alt_hps2fpga_enable_show, 111 + }; 112 + 113 + static struct altera_hps2fpga_data hps2fpga_data = { 114 + .name = HPS2FPGA_BRIDGE_NAME, 115 + .remap_mask = ALT_L3_REMAP_H2F_MSK, 116 + }; 117 + 118 + static struct altera_hps2fpga_data lwhps2fpga_data = { 119 + .name = LWHPS2FPGA_BRIDGE_NAME, 120 + .remap_mask = ALT_L3_REMAP_LWH2F_MSK, 121 + }; 122 + 123 + static struct altera_hps2fpga_data fpga2hps_data = { 124 + .name = FPGA2HPS_BRIDGE_NAME, 125 + }; 126 + 127 + static const struct of_device_id altera_fpga_of_match[] = { 128 + { .compatible = "altr,socfpga-hps2fpga-bridge", 129 + .data = &hps2fpga_data }, 130 + { .compatible = "altr,socfpga-lwhps2fpga-bridge", 131 + .data = &lwhps2fpga_data }, 132 + { .compatible = "altr,socfpga-fpga2hps-bridge", 133 + .data = &fpga2hps_data }, 134 + {}, 135 + }; 136 + 137 + static int alt_fpga_bridge_probe(struct platform_device *pdev) 138 + { 139 + struct device *dev = &pdev->dev; 140 + struct altera_hps2fpga_data *priv; 141 + const struct of_device_id *of_id; 142 + u32 enable; 143 + int ret; 144 + 145 + of_id = of_match_device(altera_fpga_of_match, dev); 146 + priv = (struct altera_hps2fpga_data *)of_id->data; 147 + 148 + priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0); 149 + if (IS_ERR(priv->bridge_reset)) { 150 + dev_err(dev, "Could not get %s reset control\n", priv->name); 151 + return PTR_ERR(priv->bridge_reset); 152 + } 153 + 154 + if (priv->remap_mask) { 155 + priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs"); 156 + if (IS_ERR(priv->l3reg)) { 157 + dev_err(dev, "regmap for altr,l3regs lookup failed\n"); 158 + return PTR_ERR(priv->l3reg); 159 + } 160 + } 161 + 162 + priv->clk = devm_clk_get(dev, NULL); 163 + if (IS_ERR(priv->clk)) { 164 + dev_err(dev, "no clock specified\n"); 165 + return PTR_ERR(priv->clk); 166 + } 167 + 168 + ret = clk_prepare_enable(priv->clk); 169 + if (ret) { 170 + dev_err(dev, "could not enable clock\n"); 171 + return -EBUSY; 172 + } 173 + 174 + spin_lock_init(&l3_remap_lock); 175 + 176 + if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) { 177 + if (enable > 1) { 178 + dev_warn(dev, "invalid bridge-enable %u > 1\n", enable); 179 + } else { 180 + dev_info(dev, "%s bridge\n", 181 + (enable ? "enabling" : "disabling")); 182 + 183 + ret = _alt_hps2fpga_enable_set(priv, enable); 184 + if (ret) { 185 + fpga_bridge_unregister(&pdev->dev); 186 + return ret; 187 + } 188 + } 189 + } 190 + 191 + return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops, 192 + priv); 193 + } 194 + 195 + static int alt_fpga_bridge_remove(struct platform_device *pdev) 196 + { 197 + struct fpga_bridge *bridge = platform_get_drvdata(pdev); 198 + struct altera_hps2fpga_data *priv = bridge->priv; 199 + 200 + fpga_bridge_unregister(&pdev->dev); 201 + 202 + clk_disable_unprepare(priv->clk); 203 + 204 + return 0; 205 + } 206 + 207 + MODULE_DEVICE_TABLE(of, altera_fpga_of_match); 208 + 209 + static struct platform_driver alt_fpga_bridge_driver = { 210 + .probe = alt_fpga_bridge_probe, 211 + .remove = alt_fpga_bridge_remove, 212 + .driver = { 213 + .name = "altera_hps2fpga_bridge", 214 + .of_match_table = of_match_ptr(altera_fpga_of_match), 215 + }, 216 + }; 217 + 218 + module_platform_driver(alt_fpga_bridge_driver); 219 + 220 + MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge"); 221 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 222 + MODULE_LICENSE("GPL v2");
+395
drivers/fpga/fpga-bridge.c
··· 1 + /* 2 + * FPGA Bridge Framework Driver 3 + * 4 + * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/fpga/fpga-bridge.h> 19 + #include <linux/idr.h> 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/of_platform.h> 23 + #include <linux/slab.h> 24 + #include <linux/spinlock.h> 25 + 26 + static DEFINE_IDA(fpga_bridge_ida); 27 + static struct class *fpga_bridge_class; 28 + 29 + /* Lock for adding/removing bridges to linked lists*/ 30 + spinlock_t bridge_list_lock; 31 + 32 + static int fpga_bridge_of_node_match(struct device *dev, const void *data) 33 + { 34 + return dev->of_node == data; 35 + } 36 + 37 + /** 38 + * fpga_bridge_enable - Enable transactions on the bridge 39 + * 40 + * @bridge: FPGA bridge 41 + * 42 + * Return: 0 for success, error code otherwise. 43 + */ 44 + int fpga_bridge_enable(struct fpga_bridge *bridge) 45 + { 46 + dev_dbg(&bridge->dev, "enable\n"); 47 + 48 + if (bridge->br_ops && bridge->br_ops->enable_set) 49 + return bridge->br_ops->enable_set(bridge, 1); 50 + 51 + return 0; 52 + } 53 + EXPORT_SYMBOL_GPL(fpga_bridge_enable); 54 + 55 + /** 56 + * fpga_bridge_disable - Disable transactions on the bridge 57 + * 58 + * @bridge: FPGA bridge 59 + * 60 + * Return: 0 for success, error code otherwise. 61 + */ 62 + int fpga_bridge_disable(struct fpga_bridge *bridge) 63 + { 64 + dev_dbg(&bridge->dev, "disable\n"); 65 + 66 + if (bridge->br_ops && bridge->br_ops->enable_set) 67 + return bridge->br_ops->enable_set(bridge, 0); 68 + 69 + return 0; 70 + } 71 + EXPORT_SYMBOL_GPL(fpga_bridge_disable); 72 + 73 + /** 74 + * of_fpga_bridge_get - get an exclusive reference to a fpga bridge 75 + * 76 + * @np: node pointer of a FPGA bridge 77 + * @info: fpga image specific information 78 + * 79 + * Return fpga_bridge struct if successful. 80 + * Return -EBUSY if someone already has a reference to the bridge. 81 + * Return -ENODEV if @np is not a FPGA Bridge. 82 + */ 83 + struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, 84 + struct fpga_image_info *info) 85 + 86 + { 87 + struct device *dev; 88 + struct fpga_bridge *bridge; 89 + int ret = -ENODEV; 90 + 91 + dev = class_find_device(fpga_bridge_class, NULL, np, 92 + fpga_bridge_of_node_match); 93 + if (!dev) 94 + goto err_dev; 95 + 96 + bridge = to_fpga_bridge(dev); 97 + if (!bridge) 98 + goto err_dev; 99 + 100 + bridge->info = info; 101 + 102 + if (!mutex_trylock(&bridge->mutex)) { 103 + ret = -EBUSY; 104 + goto err_dev; 105 + } 106 + 107 + if (!try_module_get(dev->parent->driver->owner)) 108 + goto err_ll_mod; 109 + 110 + dev_dbg(&bridge->dev, "get\n"); 111 + 112 + return bridge; 113 + 114 + err_ll_mod: 115 + mutex_unlock(&bridge->mutex); 116 + err_dev: 117 + put_device(dev); 118 + return ERR_PTR(ret); 119 + } 120 + EXPORT_SYMBOL_GPL(of_fpga_bridge_get); 121 + 122 + /** 123 + * fpga_bridge_put - release a reference to a bridge 124 + * 125 + * @bridge: FPGA bridge 126 + */ 127 + void fpga_bridge_put(struct fpga_bridge *bridge) 128 + { 129 + dev_dbg(&bridge->dev, "put\n"); 130 + 131 + bridge->info = NULL; 132 + module_put(bridge->dev.parent->driver->owner); 133 + mutex_unlock(&bridge->mutex); 134 + put_device(&bridge->dev); 135 + } 136 + EXPORT_SYMBOL_GPL(fpga_bridge_put); 137 + 138 + /** 139 + * fpga_bridges_enable - enable bridges in a list 140 + * @bridge_list: list of FPGA bridges 141 + * 142 + * Enable each bridge in the list. If list is empty, do nothing. 143 + * 144 + * Return 0 for success or empty bridge list; return error code otherwise. 145 + */ 146 + int fpga_bridges_enable(struct list_head *bridge_list) 147 + { 148 + struct fpga_bridge *bridge; 149 + struct list_head *node; 150 + int ret; 151 + 152 + list_for_each(node, bridge_list) { 153 + bridge = list_entry(node, struct fpga_bridge, node); 154 + ret = fpga_bridge_enable(bridge); 155 + if (ret) 156 + return ret; 157 + } 158 + 159 + return 0; 160 + } 161 + EXPORT_SYMBOL_GPL(fpga_bridges_enable); 162 + 163 + /** 164 + * fpga_bridges_disable - disable bridges in a list 165 + * 166 + * @bridge_list: list of FPGA bridges 167 + * 168 + * Disable each bridge in the list. If list is empty, do nothing. 169 + * 170 + * Return 0 for success or empty bridge list; return error code otherwise. 171 + */ 172 + int fpga_bridges_disable(struct list_head *bridge_list) 173 + { 174 + struct fpga_bridge *bridge; 175 + struct list_head *node; 176 + int ret; 177 + 178 + list_for_each(node, bridge_list) { 179 + bridge = list_entry(node, struct fpga_bridge, node); 180 + ret = fpga_bridge_disable(bridge); 181 + if (ret) 182 + return ret; 183 + } 184 + 185 + return 0; 186 + } 187 + EXPORT_SYMBOL_GPL(fpga_bridges_disable); 188 + 189 + /** 190 + * fpga_bridges_put - put bridges 191 + * 192 + * @bridge_list: list of FPGA bridges 193 + * 194 + * For each bridge in the list, put the bridge and remove it from the list. 195 + * If list is empty, do nothing. 196 + */ 197 + void fpga_bridges_put(struct list_head *bridge_list) 198 + { 199 + struct fpga_bridge *bridge; 200 + struct list_head *node, *next; 201 + unsigned long flags; 202 + 203 + list_for_each_safe(node, next, bridge_list) { 204 + bridge = list_entry(node, struct fpga_bridge, node); 205 + 206 + fpga_bridge_put(bridge); 207 + 208 + spin_lock_irqsave(&bridge_list_lock, flags); 209 + list_del(&bridge->node); 210 + spin_unlock_irqrestore(&bridge_list_lock, flags); 211 + } 212 + } 213 + EXPORT_SYMBOL_GPL(fpga_bridges_put); 214 + 215 + /** 216 + * fpga_bridges_get_to_list - get a bridge, add it to a list 217 + * 218 + * @np: node pointer of a FPGA bridge 219 + * @info: fpga image specific information 220 + * @bridge_list: list of FPGA bridges 221 + * 222 + * Get an exclusive reference to the bridge and and it to the list. 223 + * 224 + * Return 0 for success, error code from of_fpga_bridge_get() othewise. 225 + */ 226 + int fpga_bridge_get_to_list(struct device_node *np, 227 + struct fpga_image_info *info, 228 + struct list_head *bridge_list) 229 + { 230 + struct fpga_bridge *bridge; 231 + unsigned long flags; 232 + 233 + bridge = of_fpga_bridge_get(np, info); 234 + if (IS_ERR(bridge)) 235 + return PTR_ERR(bridge); 236 + 237 + spin_lock_irqsave(&bridge_list_lock, flags); 238 + list_add(&bridge->node, bridge_list); 239 + spin_unlock_irqrestore(&bridge_list_lock, flags); 240 + 241 + return 0; 242 + } 243 + EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list); 244 + 245 + static ssize_t name_show(struct device *dev, 246 + struct device_attribute *attr, char *buf) 247 + { 248 + struct fpga_bridge *bridge = to_fpga_bridge(dev); 249 + 250 + return sprintf(buf, "%s\n", bridge->name); 251 + } 252 + 253 + static ssize_t state_show(struct device *dev, 254 + struct device_attribute *attr, char *buf) 255 + { 256 + struct fpga_bridge *bridge = to_fpga_bridge(dev); 257 + int enable = 1; 258 + 259 + if (bridge->br_ops && bridge->br_ops->enable_show) 260 + enable = bridge->br_ops->enable_show(bridge); 261 + 262 + return sprintf(buf, "%s\n", enable ? "enabled" : "disabled"); 263 + } 264 + 265 + static DEVICE_ATTR_RO(name); 266 + static DEVICE_ATTR_RO(state); 267 + 268 + static struct attribute *fpga_bridge_attrs[] = { 269 + &dev_attr_name.attr, 270 + &dev_attr_state.attr, 271 + NULL, 272 + }; 273 + ATTRIBUTE_GROUPS(fpga_bridge); 274 + 275 + /** 276 + * fpga_bridge_register - register a fpga bridge driver 277 + * @dev: FPGA bridge device from pdev 278 + * @name: FPGA bridge name 279 + * @br_ops: pointer to structure of fpga bridge ops 280 + * @priv: FPGA bridge private data 281 + * 282 + * Return: 0 for success, error code otherwise. 283 + */ 284 + int fpga_bridge_register(struct device *dev, const char *name, 285 + const struct fpga_bridge_ops *br_ops, void *priv) 286 + { 287 + struct fpga_bridge *bridge; 288 + int id, ret = 0; 289 + 290 + if (!name || !strlen(name)) { 291 + dev_err(dev, "Attempt to register with no name!\n"); 292 + return -EINVAL; 293 + } 294 + 295 + bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 296 + if (!bridge) 297 + return -ENOMEM; 298 + 299 + id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL); 300 + if (id < 0) { 301 + ret = id; 302 + goto error_kfree; 303 + } 304 + 305 + mutex_init(&bridge->mutex); 306 + INIT_LIST_HEAD(&bridge->node); 307 + 308 + bridge->name = name; 309 + bridge->br_ops = br_ops; 310 + bridge->priv = priv; 311 + 312 + device_initialize(&bridge->dev); 313 + bridge->dev.class = fpga_bridge_class; 314 + bridge->dev.parent = dev; 315 + bridge->dev.of_node = dev->of_node; 316 + bridge->dev.id = id; 317 + dev_set_drvdata(dev, bridge); 318 + 319 + ret = dev_set_name(&bridge->dev, "br%d", id); 320 + if (ret) 321 + goto error_device; 322 + 323 + ret = device_add(&bridge->dev); 324 + if (ret) 325 + goto error_device; 326 + 327 + of_platform_populate(dev->of_node, NULL, NULL, dev); 328 + 329 + dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n", 330 + bridge->name); 331 + 332 + return 0; 333 + 334 + error_device: 335 + ida_simple_remove(&fpga_bridge_ida, id); 336 + error_kfree: 337 + kfree(bridge); 338 + 339 + return ret; 340 + } 341 + EXPORT_SYMBOL_GPL(fpga_bridge_register); 342 + 343 + /** 344 + * fpga_bridge_unregister - unregister a fpga bridge driver 345 + * @dev: FPGA bridge device from pdev 346 + */ 347 + void fpga_bridge_unregister(struct device *dev) 348 + { 349 + struct fpga_bridge *bridge = dev_get_drvdata(dev); 350 + 351 + /* 352 + * If the low level driver provides a method for putting bridge into 353 + * a desired state upon unregister, do it. 354 + */ 355 + if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove) 356 + bridge->br_ops->fpga_bridge_remove(bridge); 357 + 358 + device_unregister(&bridge->dev); 359 + } 360 + EXPORT_SYMBOL_GPL(fpga_bridge_unregister); 361 + 362 + static void fpga_bridge_dev_release(struct device *dev) 363 + { 364 + struct fpga_bridge *bridge = to_fpga_bridge(dev); 365 + 366 + ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); 367 + kfree(bridge); 368 + } 369 + 370 + static int __init fpga_bridge_dev_init(void) 371 + { 372 + spin_lock_init(&bridge_list_lock); 373 + 374 + fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge"); 375 + if (IS_ERR(fpga_bridge_class)) 376 + return PTR_ERR(fpga_bridge_class); 377 + 378 + fpga_bridge_class->dev_groups = fpga_bridge_groups; 379 + fpga_bridge_class->dev_release = fpga_bridge_dev_release; 380 + 381 + return 0; 382 + } 383 + 384 + static void __exit fpga_bridge_dev_exit(void) 385 + { 386 + class_destroy(fpga_bridge_class); 387 + ida_destroy(&fpga_bridge_ida); 388 + } 389 + 390 + MODULE_DESCRIPTION("FPGA Bridge Driver"); 391 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 392 + MODULE_LICENSE("GPL v2"); 393 + 394 + subsys_initcall(fpga_bridge_dev_init); 395 + module_exit(fpga_bridge_dev_exit);
+66 -31
drivers/fpga/fpga-mgr.c
··· 32 32 /** 33 33 * fpga_mgr_buf_load - load fpga from image in buffer 34 34 * @mgr: fpga manager 35 - * @flags: flags setting fpga confuration modes 35 + * @info: fpga image specific information 36 36 * @buf: buffer contain fpga image 37 37 * @count: byte count of buf 38 38 * 39 39 * Step the low level fpga manager through the device-specific steps of getting 40 40 * an FPGA ready to be configured, writing the image to it, then doing whatever 41 41 * post-configuration steps necessary. This code assumes the caller got the 42 - * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code. 42 + * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is 43 + * not an error code. 43 44 * 44 45 * Return: 0 on success, negative error code otherwise. 45 46 */ 46 - int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf, 47 - size_t count) 47 + int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 48 + const char *buf, size_t count) 48 49 { 49 50 struct device *dev = &mgr->dev; 50 51 int ret; ··· 53 52 /* 54 53 * Call the low level driver's write_init function. This will do the 55 54 * device-specific things to get the FPGA into the state where it is 56 - * ready to receive an FPGA image. 55 + * ready to receive an FPGA image. The low level driver only gets to 56 + * see the first initial_header_size bytes in the buffer. 57 57 */ 58 58 mgr->state = FPGA_MGR_STATE_WRITE_INIT; 59 - ret = mgr->mops->write_init(mgr, flags, buf, count); 59 + ret = mgr->mops->write_init(mgr, info, buf, 60 + min(mgr->mops->initial_header_size, count)); 60 61 if (ret) { 61 62 dev_err(dev, "Error preparing FPGA for writing\n"); 62 63 mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR; ··· 81 78 * steps to finish and set the FPGA into operating mode. 82 79 */ 83 80 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE; 84 - ret = mgr->mops->write_complete(mgr, flags); 81 + ret = mgr->mops->write_complete(mgr, info); 85 82 if (ret) { 86 83 dev_err(dev, "Error after writing image data to FPGA\n"); 87 84 mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR; ··· 96 93 /** 97 94 * fpga_mgr_firmware_load - request firmware and load to fpga 98 95 * @mgr: fpga manager 99 - * @flags: flags setting fpga confuration modes 96 + * @info: fpga image specific information 100 97 * @image_name: name of image file on the firmware search path 101 98 * 102 99 * Request an FPGA image using the firmware class, then write out to the FPGA. 103 100 * Update the state before each step to provide info on what step failed if 104 101 * there is a failure. This code assumes the caller got the mgr pointer 105 - * from of_fpga_mgr_get() and checked that it is not an error code. 102 + * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error 103 + * code. 106 104 * 107 105 * Return: 0 on success, negative error code otherwise. 108 106 */ 109 - int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, 107 + int fpga_mgr_firmware_load(struct fpga_manager *mgr, 108 + struct fpga_image_info *info, 110 109 const char *image_name) 111 110 { 112 111 struct device *dev = &mgr->dev; ··· 126 121 return ret; 127 122 } 128 123 129 - ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size); 124 + ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size); 130 125 131 126 release_firmware(fw); 132 127 ··· 186 181 }; 187 182 ATTRIBUTE_GROUPS(fpga_mgr); 188 183 189 - static int fpga_mgr_of_node_match(struct device *dev, const void *data) 190 - { 191 - return dev->of_node == data; 192 - } 193 - 194 - /** 195 - * of_fpga_mgr_get - get an exclusive reference to a fpga mgr 196 - * @node: device node 197 - * 198 - * Given a device node, get an exclusive reference to a fpga mgr. 199 - * 200 - * Return: fpga manager struct or IS_ERR() condition containing error code. 201 - */ 202 - struct fpga_manager *of_fpga_mgr_get(struct device_node *node) 184 + struct fpga_manager *__fpga_mgr_get(struct device *dev) 203 185 { 204 186 struct fpga_manager *mgr; 205 - struct device *dev; 206 187 int ret = -ENODEV; 207 - 208 - dev = class_find_device(fpga_mgr_class, NULL, node, 209 - fpga_mgr_of_node_match); 210 - if (!dev) 211 - return ERR_PTR(-ENODEV); 212 188 213 189 mgr = to_fpga_manager(dev); 214 190 if (!mgr) ··· 211 225 err_dev: 212 226 put_device(dev); 213 227 return ERR_PTR(ret); 228 + } 229 + 230 + static int fpga_mgr_dev_match(struct device *dev, const void *data) 231 + { 232 + return dev->parent == data; 233 + } 234 + 235 + /** 236 + * fpga_mgr_get - get an exclusive reference to a fpga mgr 237 + * @dev: parent device that fpga mgr was registered with 238 + * 239 + * Given a device, get an exclusive reference to a fpga mgr. 240 + * 241 + * Return: fpga manager struct or IS_ERR() condition containing error code. 242 + */ 243 + struct fpga_manager *fpga_mgr_get(struct device *dev) 244 + { 245 + struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev, 246 + fpga_mgr_dev_match); 247 + if (!mgr_dev) 248 + return ERR_PTR(-ENODEV); 249 + 250 + return __fpga_mgr_get(mgr_dev); 251 + } 252 + EXPORT_SYMBOL_GPL(fpga_mgr_get); 253 + 254 + static int fpga_mgr_of_node_match(struct device *dev, const void *data) 255 + { 256 + return dev->of_node == data; 257 + } 258 + 259 + /** 260 + * of_fpga_mgr_get - get an exclusive reference to a fpga mgr 261 + * @node: device node 262 + * 263 + * Given a device node, get an exclusive reference to a fpga mgr. 264 + * 265 + * Return: fpga manager struct or IS_ERR() condition containing error code. 266 + */ 267 + struct fpga_manager *of_fpga_mgr_get(struct device_node *node) 268 + { 269 + struct device *dev; 270 + 271 + dev = class_find_device(fpga_mgr_class, NULL, node, 272 + fpga_mgr_of_node_match); 273 + if (!dev) 274 + return ERR_PTR(-ENODEV); 275 + 276 + return __fpga_mgr_get(dev); 214 277 } 215 278 EXPORT_SYMBOL_GPL(of_fpga_mgr_get); 216 279
+603
drivers/fpga/fpga-region.c
··· 1 + /* 2 + * FPGA Region - Device Tree support for FPGA programming under Linux 3 + * 4 + * Copyright (C) 2013-2016 Altera Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/fpga/fpga-bridge.h> 20 + #include <linux/fpga/fpga-mgr.h> 21 + #include <linux/idr.h> 22 + #include <linux/kernel.h> 23 + #include <linux/list.h> 24 + #include <linux/module.h> 25 + #include <linux/of_platform.h> 26 + #include <linux/slab.h> 27 + #include <linux/spinlock.h> 28 + 29 + /** 30 + * struct fpga_region - FPGA Region structure 31 + * @dev: FPGA Region device 32 + * @mutex: enforces exclusive reference to region 33 + * @bridge_list: list of FPGA bridges specified in region 34 + * @info: fpga image specific information 35 + */ 36 + struct fpga_region { 37 + struct device dev; 38 + struct mutex mutex; /* for exclusive reference to region */ 39 + struct list_head bridge_list; 40 + struct fpga_image_info *info; 41 + }; 42 + 43 + #define to_fpga_region(d) container_of(d, struct fpga_region, dev) 44 + 45 + static DEFINE_IDA(fpga_region_ida); 46 + static struct class *fpga_region_class; 47 + 48 + static const struct of_device_id fpga_region_of_match[] = { 49 + { .compatible = "fpga-region", }, 50 + {}, 51 + }; 52 + MODULE_DEVICE_TABLE(of, fpga_region_of_match); 53 + 54 + static int fpga_region_of_node_match(struct device *dev, const void *data) 55 + { 56 + return dev->of_node == data; 57 + } 58 + 59 + /** 60 + * fpga_region_find - find FPGA region 61 + * @np: device node of FPGA Region 62 + * Caller will need to put_device(&region->dev) when done. 63 + * Returns FPGA Region struct or NULL 64 + */ 65 + static struct fpga_region *fpga_region_find(struct device_node *np) 66 + { 67 + struct device *dev; 68 + 69 + dev = class_find_device(fpga_region_class, NULL, np, 70 + fpga_region_of_node_match); 71 + if (!dev) 72 + return NULL; 73 + 74 + return to_fpga_region(dev); 75 + } 76 + 77 + /** 78 + * fpga_region_get - get an exclusive reference to a fpga region 79 + * @region: FPGA Region struct 80 + * 81 + * Caller should call fpga_region_put() when done with region. 82 + * 83 + * Return fpga_region struct if successful. 84 + * Return -EBUSY if someone already has a reference to the region. 85 + * Return -ENODEV if @np is not a FPGA Region. 86 + */ 87 + static struct fpga_region *fpga_region_get(struct fpga_region *region) 88 + { 89 + struct device *dev = &region->dev; 90 + 91 + if (!mutex_trylock(&region->mutex)) { 92 + dev_dbg(dev, "%s: FPGA Region already in use\n", __func__); 93 + return ERR_PTR(-EBUSY); 94 + } 95 + 96 + get_device(dev); 97 + of_node_get(dev->of_node); 98 + if (!try_module_get(dev->parent->driver->owner)) { 99 + of_node_put(dev->of_node); 100 + put_device(dev); 101 + mutex_unlock(&region->mutex); 102 + return ERR_PTR(-ENODEV); 103 + } 104 + 105 + dev_dbg(&region->dev, "get\n"); 106 + 107 + return region; 108 + } 109 + 110 + /** 111 + * fpga_region_put - release a reference to a region 112 + * 113 + * @region: FPGA region 114 + */ 115 + static void fpga_region_put(struct fpga_region *region) 116 + { 117 + struct device *dev = &region->dev; 118 + 119 + dev_dbg(&region->dev, "put\n"); 120 + 121 + module_put(dev->parent->driver->owner); 122 + of_node_put(dev->of_node); 123 + put_device(dev); 124 + mutex_unlock(&region->mutex); 125 + } 126 + 127 + /** 128 + * fpga_region_get_manager - get exclusive reference for FPGA manager 129 + * @region: FPGA region 130 + * 131 + * Get FPGA Manager from "fpga-mgr" property or from ancestor region. 132 + * 133 + * Caller should call fpga_mgr_put() when done with manager. 134 + * 135 + * Return: fpga manager struct or IS_ERR() condition containing error code. 136 + */ 137 + static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region) 138 + { 139 + struct device *dev = &region->dev; 140 + struct device_node *np = dev->of_node; 141 + struct device_node *mgr_node; 142 + struct fpga_manager *mgr; 143 + 144 + of_node_get(np); 145 + while (np) { 146 + if (of_device_is_compatible(np, "fpga-region")) { 147 + mgr_node = of_parse_phandle(np, "fpga-mgr", 0); 148 + if (mgr_node) { 149 + mgr = of_fpga_mgr_get(mgr_node); 150 + of_node_put(np); 151 + return mgr; 152 + } 153 + } 154 + np = of_get_next_parent(np); 155 + } 156 + of_node_put(np); 157 + 158 + return ERR_PTR(-EINVAL); 159 + } 160 + 161 + /** 162 + * fpga_region_get_bridges - create a list of bridges 163 + * @region: FPGA region 164 + * @overlay: device node of the overlay 165 + * 166 + * Create a list of bridges including the parent bridge and the bridges 167 + * specified by "fpga-bridges" property. Note that the 168 + * fpga_bridges_enable/disable/put functions are all fine with an empty list 169 + * if that happens. 170 + * 171 + * Caller should call fpga_bridges_put(&region->bridge_list) when 172 + * done with the bridges. 173 + * 174 + * Return 0 for success (even if there are no bridges specified) 175 + * or -EBUSY if any of the bridges are in use. 176 + */ 177 + static int fpga_region_get_bridges(struct fpga_region *region, 178 + struct device_node *overlay) 179 + { 180 + struct device *dev = &region->dev; 181 + struct device_node *region_np = dev->of_node; 182 + struct device_node *br, *np, *parent_br = NULL; 183 + int i, ret; 184 + 185 + /* If parent is a bridge, add to list */ 186 + ret = fpga_bridge_get_to_list(region_np->parent, region->info, 187 + &region->bridge_list); 188 + if (ret == -EBUSY) 189 + return ret; 190 + 191 + if (!ret) 192 + parent_br = region_np->parent; 193 + 194 + /* If overlay has a list of bridges, use it. */ 195 + if (of_parse_phandle(overlay, "fpga-bridges", 0)) 196 + np = overlay; 197 + else 198 + np = region_np; 199 + 200 + for (i = 0; ; i++) { 201 + br = of_parse_phandle(np, "fpga-bridges", i); 202 + if (!br) 203 + break; 204 + 205 + /* If parent bridge is in list, skip it. */ 206 + if (br == parent_br) 207 + continue; 208 + 209 + /* If node is a bridge, get it and add to list */ 210 + ret = fpga_bridge_get_to_list(br, region->info, 211 + &region->bridge_list); 212 + 213 + /* If any of the bridges are in use, give up */ 214 + if (ret == -EBUSY) { 215 + fpga_bridges_put(&region->bridge_list); 216 + return -EBUSY; 217 + } 218 + } 219 + 220 + return 0; 221 + } 222 + 223 + /** 224 + * fpga_region_program_fpga - program FPGA 225 + * @region: FPGA region 226 + * @firmware_name: name of FPGA image firmware file 227 + * @overlay: device node of the overlay 228 + * Program an FPGA using information in the device tree. 229 + * Function assumes that there is a firmware-name property. 230 + * Return 0 for success or negative error code. 231 + */ 232 + static int fpga_region_program_fpga(struct fpga_region *region, 233 + const char *firmware_name, 234 + struct device_node *overlay) 235 + { 236 + struct fpga_manager *mgr; 237 + int ret; 238 + 239 + region = fpga_region_get(region); 240 + if (IS_ERR(region)) { 241 + pr_err("failed to get fpga region\n"); 242 + return PTR_ERR(region); 243 + } 244 + 245 + mgr = fpga_region_get_manager(region); 246 + if (IS_ERR(mgr)) { 247 + pr_err("failed to get fpga region manager\n"); 248 + return PTR_ERR(mgr); 249 + } 250 + 251 + ret = fpga_region_get_bridges(region, overlay); 252 + if (ret) { 253 + pr_err("failed to get fpga region bridges\n"); 254 + goto err_put_mgr; 255 + } 256 + 257 + ret = fpga_bridges_disable(&region->bridge_list); 258 + if (ret) { 259 + pr_err("failed to disable region bridges\n"); 260 + goto err_put_br; 261 + } 262 + 263 + ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name); 264 + if (ret) { 265 + pr_err("failed to load fpga image\n"); 266 + goto err_put_br; 267 + } 268 + 269 + ret = fpga_bridges_enable(&region->bridge_list); 270 + if (ret) { 271 + pr_err("failed to enable region bridges\n"); 272 + goto err_put_br; 273 + } 274 + 275 + fpga_mgr_put(mgr); 276 + fpga_region_put(region); 277 + 278 + return 0; 279 + 280 + err_put_br: 281 + fpga_bridges_put(&region->bridge_list); 282 + err_put_mgr: 283 + fpga_mgr_put(mgr); 284 + fpga_region_put(region); 285 + 286 + return ret; 287 + } 288 + 289 + /** 290 + * child_regions_with_firmware 291 + * @overlay: device node of the overlay 292 + * 293 + * If the overlay adds child FPGA regions, they are not allowed to have 294 + * firmware-name property. 295 + * 296 + * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name. 297 + */ 298 + static int child_regions_with_firmware(struct device_node *overlay) 299 + { 300 + struct device_node *child_region; 301 + const char *child_firmware_name; 302 + int ret = 0; 303 + 304 + of_node_get(overlay); 305 + 306 + child_region = of_find_matching_node(overlay, fpga_region_of_match); 307 + while (child_region) { 308 + if (!of_property_read_string(child_region, "firmware-name", 309 + &child_firmware_name)) { 310 + ret = -EINVAL; 311 + break; 312 + } 313 + child_region = of_find_matching_node(child_region, 314 + fpga_region_of_match); 315 + } 316 + 317 + of_node_put(child_region); 318 + 319 + if (ret) 320 + pr_err("firmware-name not allowed in child FPGA region: %s", 321 + child_region->full_name); 322 + 323 + return ret; 324 + } 325 + 326 + /** 327 + * fpga_region_notify_pre_apply - pre-apply overlay notification 328 + * 329 + * @region: FPGA region that the overlay was applied to 330 + * @nd: overlay notification data 331 + * 332 + * Called after when an overlay targeted to a FPGA Region is about to be 333 + * applied. Function will check the properties that will be added to the FPGA 334 + * region. If the checks pass, it will program the FPGA. 335 + * 336 + * The checks are: 337 + * The overlay must add either firmware-name or external-fpga-config property 338 + * to the FPGA Region. 339 + * 340 + * firmware-name : program the FPGA 341 + * external-fpga-config : FPGA is already programmed 342 + * 343 + * The overlay can add other FPGA regions, but child FPGA regions cannot have a 344 + * firmware-name property since those regions don't exist yet. 345 + * 346 + * If the overlay that breaks the rules, notifier returns an error and the 347 + * overlay is rejected before it goes into the main tree. 348 + * 349 + * Returns 0 for success or negative error code for failure. 350 + */ 351 + static int fpga_region_notify_pre_apply(struct fpga_region *region, 352 + struct of_overlay_notify_data *nd) 353 + { 354 + const char *firmware_name = NULL; 355 + struct fpga_image_info *info; 356 + int ret; 357 + 358 + info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL); 359 + if (!info) 360 + return -ENOMEM; 361 + 362 + region->info = info; 363 + 364 + /* Reject overlay if child FPGA Regions have firmware-name property */ 365 + ret = child_regions_with_firmware(nd->overlay); 366 + if (ret) 367 + return ret; 368 + 369 + /* Read FPGA region properties from the overlay */ 370 + if (of_property_read_bool(nd->overlay, "partial-fpga-config")) 371 + info->flags |= FPGA_MGR_PARTIAL_RECONFIG; 372 + 373 + if (of_property_read_bool(nd->overlay, "external-fpga-config")) 374 + info->flags |= FPGA_MGR_EXTERNAL_CONFIG; 375 + 376 + of_property_read_string(nd->overlay, "firmware-name", &firmware_name); 377 + 378 + of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us", 379 + &info->enable_timeout_us); 380 + 381 + of_property_read_u32(nd->overlay, "region-freeze-timeout-us", 382 + &info->disable_timeout_us); 383 + 384 + /* If FPGA was externally programmed, don't specify firmware */ 385 + if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) { 386 + pr_err("error: specified firmware and external-fpga-config"); 387 + return -EINVAL; 388 + } 389 + 390 + /* FPGA is already configured externally. We're done. */ 391 + if (info->flags & FPGA_MGR_EXTERNAL_CONFIG) 392 + return 0; 393 + 394 + /* If we got this far, we should be programming the FPGA */ 395 + if (!firmware_name) { 396 + pr_err("should specify firmware-name or external-fpga-config\n"); 397 + return -EINVAL; 398 + } 399 + 400 + return fpga_region_program_fpga(region, firmware_name, nd->overlay); 401 + } 402 + 403 + /** 404 + * fpga_region_notify_post_remove - post-remove overlay notification 405 + * 406 + * @region: FPGA region that was targeted by the overlay that was removed 407 + * @nd: overlay notification data 408 + * 409 + * Called after an overlay has been removed if the overlay's target was a 410 + * FPGA region. 411 + */ 412 + static void fpga_region_notify_post_remove(struct fpga_region *region, 413 + struct of_overlay_notify_data *nd) 414 + { 415 + fpga_bridges_disable(&region->bridge_list); 416 + fpga_bridges_put(&region->bridge_list); 417 + devm_kfree(&region->dev, region->info); 418 + region->info = NULL; 419 + } 420 + 421 + /** 422 + * of_fpga_region_notify - reconfig notifier for dynamic DT changes 423 + * @nb: notifier block 424 + * @action: notifier action 425 + * @arg: reconfig data 426 + * 427 + * This notifier handles programming a FPGA when a "firmware-name" property is 428 + * added to a fpga-region. 429 + * 430 + * Returns NOTIFY_OK or error if FPGA programming fails. 431 + */ 432 + static int of_fpga_region_notify(struct notifier_block *nb, 433 + unsigned long action, void *arg) 434 + { 435 + struct of_overlay_notify_data *nd = arg; 436 + struct fpga_region *region; 437 + int ret; 438 + 439 + switch (action) { 440 + case OF_OVERLAY_PRE_APPLY: 441 + pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__); 442 + break; 443 + case OF_OVERLAY_POST_APPLY: 444 + pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__); 445 + return NOTIFY_OK; /* not for us */ 446 + case OF_OVERLAY_PRE_REMOVE: 447 + pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__); 448 + return NOTIFY_OK; /* not for us */ 449 + case OF_OVERLAY_POST_REMOVE: 450 + pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__); 451 + break; 452 + default: /* should not happen */ 453 + return NOTIFY_OK; 454 + } 455 + 456 + region = fpga_region_find(nd->target); 457 + if (!region) 458 + return NOTIFY_OK; 459 + 460 + ret = 0; 461 + switch (action) { 462 + case OF_OVERLAY_PRE_APPLY: 463 + ret = fpga_region_notify_pre_apply(region, nd); 464 + break; 465 + 466 + case OF_OVERLAY_POST_REMOVE: 467 + fpga_region_notify_post_remove(region, nd); 468 + break; 469 + } 470 + 471 + put_device(&region->dev); 472 + 473 + if (ret) 474 + return notifier_from_errno(ret); 475 + 476 + return NOTIFY_OK; 477 + } 478 + 479 + static struct notifier_block fpga_region_of_nb = { 480 + .notifier_call = of_fpga_region_notify, 481 + }; 482 + 483 + static int fpga_region_probe(struct platform_device *pdev) 484 + { 485 + struct device *dev = &pdev->dev; 486 + struct device_node *np = dev->of_node; 487 + struct fpga_region *region; 488 + int id, ret = 0; 489 + 490 + region = kzalloc(sizeof(*region), GFP_KERNEL); 491 + if (!region) 492 + return -ENOMEM; 493 + 494 + id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); 495 + if (id < 0) { 496 + ret = id; 497 + goto err_kfree; 498 + } 499 + 500 + mutex_init(&region->mutex); 501 + INIT_LIST_HEAD(&region->bridge_list); 502 + 503 + device_initialize(&region->dev); 504 + region->dev.class = fpga_region_class; 505 + region->dev.parent = dev; 506 + region->dev.of_node = np; 507 + region->dev.id = id; 508 + dev_set_drvdata(dev, region); 509 + 510 + ret = dev_set_name(&region->dev, "region%d", id); 511 + if (ret) 512 + goto err_remove; 513 + 514 + ret = device_add(&region->dev); 515 + if (ret) 516 + goto err_remove; 517 + 518 + of_platform_populate(np, fpga_region_of_match, NULL, &region->dev); 519 + 520 + dev_info(dev, "FPGA Region probed\n"); 521 + 522 + return 0; 523 + 524 + err_remove: 525 + ida_simple_remove(&fpga_region_ida, id); 526 + err_kfree: 527 + kfree(region); 528 + 529 + return ret; 530 + } 531 + 532 + static int fpga_region_remove(struct platform_device *pdev) 533 + { 534 + struct fpga_region *region = platform_get_drvdata(pdev); 535 + 536 + device_unregister(&region->dev); 537 + 538 + return 0; 539 + } 540 + 541 + static struct platform_driver fpga_region_driver = { 542 + .probe = fpga_region_probe, 543 + .remove = fpga_region_remove, 544 + .driver = { 545 + .name = "fpga-region", 546 + .of_match_table = of_match_ptr(fpga_region_of_match), 547 + }, 548 + }; 549 + 550 + static void fpga_region_dev_release(struct device *dev) 551 + { 552 + struct fpga_region *region = to_fpga_region(dev); 553 + 554 + ida_simple_remove(&fpga_region_ida, region->dev.id); 555 + kfree(region); 556 + } 557 + 558 + /** 559 + * fpga_region_init - init function for fpga_region class 560 + * Creates the fpga_region class and registers a reconfig notifier. 561 + */ 562 + static int __init fpga_region_init(void) 563 + { 564 + int ret; 565 + 566 + fpga_region_class = class_create(THIS_MODULE, "fpga_region"); 567 + if (IS_ERR(fpga_region_class)) 568 + return PTR_ERR(fpga_region_class); 569 + 570 + fpga_region_class->dev_release = fpga_region_dev_release; 571 + 572 + ret = of_overlay_notifier_register(&fpga_region_of_nb); 573 + if (ret) 574 + goto err_class; 575 + 576 + ret = platform_driver_register(&fpga_region_driver); 577 + if (ret) 578 + goto err_plat; 579 + 580 + return 0; 581 + 582 + err_plat: 583 + of_overlay_notifier_unregister(&fpga_region_of_nb); 584 + err_class: 585 + class_destroy(fpga_region_class); 586 + ida_destroy(&fpga_region_ida); 587 + return ret; 588 + } 589 + 590 + static void __exit fpga_region_exit(void) 591 + { 592 + platform_driver_unregister(&fpga_region_driver); 593 + of_overlay_notifier_unregister(&fpga_region_of_nb); 594 + class_destroy(fpga_region_class); 595 + ida_destroy(&fpga_region_ida); 596 + } 597 + 598 + subsys_initcall(fpga_region_init); 599 + module_exit(fpga_region_exit); 600 + 601 + MODULE_DESCRIPTION("FPGA Region"); 602 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 603 + MODULE_LICENSE("GPL v2");
+557
drivers/fpga/socfpga-a10.c
··· 1 + /* 2 + * FPGA Manager Driver for Altera Arria10 SoCFPGA 3 + * 4 + * Copyright (C) 2015-2016 Altera Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along with 16 + * this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/clk.h> 20 + #include <linux/device.h> 21 + #include <linux/delay.h> 22 + #include <linux/fpga/fpga-mgr.h> 23 + #include <linux/io.h> 24 + #include <linux/module.h> 25 + #include <linux/of_address.h> 26 + #include <linux/regmap.h> 27 + 28 + #define A10_FPGAMGR_DCLKCNT_OFST 0x08 29 + #define A10_FPGAMGR_DCLKSTAT_OFST 0x0c 30 + #define A10_FPGAMGR_IMGCFG_CTL_00_OFST 0x70 31 + #define A10_FPGAMGR_IMGCFG_CTL_01_OFST 0x74 32 + #define A10_FPGAMGR_IMGCFG_CTL_02_OFST 0x78 33 + #define A10_FPGAMGR_IMGCFG_STAT_OFST 0x80 34 + 35 + #define A10_FPGAMGR_DCLKSTAT_DCLKDONE BIT(0) 36 + 37 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG BIT(0) 38 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS BIT(1) 39 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE BIT(2) 40 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG BIT(8) 41 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE BIT(16) 42 + #define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE BIT(24) 43 + 44 + #define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG BIT(0) 45 + #define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST BIT(16) 46 + #define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE BIT(24) 47 + 48 + #define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL BIT(0) 49 + #define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK (BIT(16) | BIT(17)) 50 + #define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT 16 51 + #define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH BIT(24) 52 + #define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT 24 53 + 54 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR BIT(0) 55 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE BIT(1) 56 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE BIT(2) 57 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN BIT(4) 58 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN BIT(6) 59 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY BIT(9) 60 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE BIT(10) 61 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR BIT(11) 62 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN BIT(12) 63 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK (BIT(16) | BIT(17) | BIT(18)) 64 + #define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT 16 65 + 66 + /* FPGA CD Ratio Value */ 67 + #define CDRATIO_x1 0x0 68 + #define CDRATIO_x2 0x1 69 + #define CDRATIO_x4 0x2 70 + #define CDRATIO_x8 0x3 71 + 72 + /* Configuration width 16/32 bit */ 73 + #define CFGWDTH_32 1 74 + #define CFGWDTH_16 0 75 + 76 + /* 77 + * struct a10_fpga_priv - private data for fpga manager 78 + * @regmap: regmap for register access 79 + * @fpga_data_addr: iomap for single address data register to FPGA 80 + * @clk: clock 81 + */ 82 + struct a10_fpga_priv { 83 + struct regmap *regmap; 84 + void __iomem *fpga_data_addr; 85 + struct clk *clk; 86 + }; 87 + 88 + static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg) 89 + { 90 + switch (reg) { 91 + case A10_FPGAMGR_DCLKCNT_OFST: 92 + case A10_FPGAMGR_DCLKSTAT_OFST: 93 + case A10_FPGAMGR_IMGCFG_CTL_00_OFST: 94 + case A10_FPGAMGR_IMGCFG_CTL_01_OFST: 95 + case A10_FPGAMGR_IMGCFG_CTL_02_OFST: 96 + return true; 97 + } 98 + return false; 99 + } 100 + 101 + static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg) 102 + { 103 + switch (reg) { 104 + case A10_FPGAMGR_DCLKCNT_OFST: 105 + case A10_FPGAMGR_DCLKSTAT_OFST: 106 + case A10_FPGAMGR_IMGCFG_CTL_00_OFST: 107 + case A10_FPGAMGR_IMGCFG_CTL_01_OFST: 108 + case A10_FPGAMGR_IMGCFG_CTL_02_OFST: 109 + case A10_FPGAMGR_IMGCFG_STAT_OFST: 110 + return true; 111 + } 112 + return false; 113 + } 114 + 115 + static const struct regmap_config socfpga_a10_fpga_regmap_config = { 116 + .reg_bits = 32, 117 + .reg_stride = 4, 118 + .val_bits = 32, 119 + .writeable_reg = socfpga_a10_fpga_writeable_reg, 120 + .readable_reg = socfpga_a10_fpga_readable_reg, 121 + .max_register = A10_FPGAMGR_IMGCFG_STAT_OFST, 122 + .cache_type = REGCACHE_NONE, 123 + }; 124 + 125 + /* 126 + * from the register map description of cdratio in imgcfg_ctrl_02: 127 + * Normal Configuration : 32bit Passive Parallel 128 + * Partial Reconfiguration : 16bit Passive Parallel 129 + */ 130 + static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv, 131 + int width) 132 + { 133 + width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT; 134 + 135 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST, 136 + A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width); 137 + } 138 + 139 + static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv, 140 + u32 count) 141 + { 142 + u32 val; 143 + 144 + /* Clear any existing DONE status. */ 145 + regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, 146 + A10_FPGAMGR_DCLKSTAT_DCLKDONE); 147 + 148 + /* Issue the DCLK regmap. */ 149 + regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count); 150 + 151 + /* wait till the dclkcnt done */ 152 + regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val, 153 + val, 1, 100); 154 + 155 + /* Clear DONE status. */ 156 + regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, 157 + A10_FPGAMGR_DCLKSTAT_DCLKDONE); 158 + } 159 + 160 + #define RBF_ENCRYPTION_MODE_OFFSET 69 161 + #define RBF_DECOMPRESS_OFFSET 229 162 + 163 + static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size) 164 + { 165 + if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1) 166 + return -EINVAL; 167 + 168 + /* Is the bitstream encrypted? */ 169 + return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0; 170 + } 171 + 172 + static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size) 173 + { 174 + if (buf32_size < RBF_DECOMPRESS_OFFSET + 1) 175 + return -EINVAL; 176 + 177 + /* Is the bitstream compressed? */ 178 + return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1); 179 + } 180 + 181 + static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width, 182 + bool encrypt, bool compress) 183 + { 184 + unsigned int cd_ratio; 185 + 186 + /* 187 + * cd ratio is dependent on cfg width and whether the bitstream 188 + * is encrypted and/or compressed. 189 + * 190 + * | width | encr. | compr. | cd ratio | 191 + * | 16 | 0 | 0 | 1 | 192 + * | 16 | 0 | 1 | 4 | 193 + * | 16 | 1 | 0 | 2 | 194 + * | 16 | 1 | 1 | 4 | 195 + * | 32 | 0 | 0 | 1 | 196 + * | 32 | 0 | 1 | 8 | 197 + * | 32 | 1 | 0 | 4 | 198 + * | 32 | 1 | 1 | 8 | 199 + */ 200 + if (!compress && !encrypt) 201 + return CDRATIO_x1; 202 + 203 + if (compress) 204 + cd_ratio = CDRATIO_x4; 205 + else 206 + cd_ratio = CDRATIO_x2; 207 + 208 + /* If 32 bit, double the cd ratio by incrementing the field */ 209 + if (cfg_width == CFGWDTH_32) 210 + cd_ratio += 1; 211 + 212 + return cd_ratio; 213 + } 214 + 215 + static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr, 216 + unsigned int cfg_width, 217 + const char *buf, size_t count) 218 + { 219 + struct a10_fpga_priv *priv = mgr->priv; 220 + unsigned int cd_ratio; 221 + int encrypt, compress; 222 + 223 + encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4); 224 + if (encrypt < 0) 225 + return -EINVAL; 226 + 227 + compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4); 228 + if (compress < 0) 229 + return -EINVAL; 230 + 231 + cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress); 232 + 233 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST, 234 + A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK, 235 + cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT); 236 + 237 + return 0; 238 + } 239 + 240 + static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv) 241 + { 242 + u32 val; 243 + 244 + regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val); 245 + 246 + return val; 247 + } 248 + 249 + static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv) 250 + { 251 + u32 reg, i; 252 + 253 + for (i = 0; i < 10 ; i++) { 254 + reg = socfpga_a10_fpga_read_stat(priv); 255 + 256 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR) 257 + return -EINVAL; 258 + 259 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY) 260 + return 0; 261 + } 262 + 263 + return -ETIMEDOUT; 264 + } 265 + 266 + static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv) 267 + { 268 + u32 reg, i; 269 + 270 + for (i = 0; i < 10 ; i++) { 271 + reg = socfpga_a10_fpga_read_stat(priv); 272 + 273 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR) 274 + return -EINVAL; 275 + 276 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE) 277 + return 0; 278 + } 279 + 280 + return -ETIMEDOUT; 281 + } 282 + 283 + /* Start the FPGA programming by initialize the FPGA Manager */ 284 + static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr, 285 + struct fpga_image_info *info, 286 + const char *buf, size_t count) 287 + { 288 + struct a10_fpga_priv *priv = mgr->priv; 289 + unsigned int cfg_width; 290 + u32 msel, stat, mask; 291 + int ret; 292 + 293 + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) 294 + cfg_width = CFGWDTH_16; 295 + else 296 + return -EINVAL; 297 + 298 + /* Check for passive parallel (msel == 000 or 001) */ 299 + msel = socfpga_a10_fpga_read_stat(priv); 300 + msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK; 301 + msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT; 302 + if ((msel != 0) && (msel != 1)) { 303 + dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel); 304 + return -EINVAL; 305 + } 306 + 307 + /* Make sure no external devices are interfering */ 308 + stat = socfpga_a10_fpga_read_stat(priv); 309 + mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN | 310 + A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN; 311 + if ((stat & mask) != mask) 312 + return -EINVAL; 313 + 314 + /* Set cfg width */ 315 + socfpga_a10_fpga_set_cfg_width(priv, cfg_width); 316 + 317 + /* Determine cd ratio from bitstream header and set cd ratio */ 318 + ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count); 319 + if (ret) 320 + return ret; 321 + 322 + /* 323 + * Clear s2f_nce to enable chip select. Leave pr_request 324 + * unasserted and override disabled. 325 + */ 326 + regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 327 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG); 328 + 329 + /* Set cfg_ctrl to enable s2f dclk and data */ 330 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST, 331 + A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 332 + A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL); 333 + 334 + /* 335 + * Disable overrides not needed for pr. 336 + * s2f_config==1 leaves reset deasseted. 337 + */ 338 + regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST, 339 + A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG | 340 + A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS | 341 + A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE | 342 + A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG); 343 + 344 + /* Enable override for data, dclk, nce, and pr_request to CSS */ 345 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 346 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0); 347 + 348 + /* Send some clocks to clear out any errors */ 349 + socfpga_a10_fpga_generate_dclks(priv, 256); 350 + 351 + /* Assert pr_request */ 352 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 353 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 354 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST); 355 + 356 + /* Provide 2048 DCLKs before starting the config data streaming. */ 357 + socfpga_a10_fpga_generate_dclks(priv, 0x7ff); 358 + 359 + /* Wait for pr_ready */ 360 + return socfpga_a10_fpga_wait_for_pr_ready(priv); 361 + } 362 + 363 + /* 364 + * write data to the FPGA data register 365 + */ 366 + static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf, 367 + size_t count) 368 + { 369 + struct a10_fpga_priv *priv = mgr->priv; 370 + u32 *buffer_32 = (u32 *)buf; 371 + size_t i = 0; 372 + 373 + if (count <= 0) 374 + return -EINVAL; 375 + 376 + /* Write out the complete 32-bit chunks */ 377 + while (count >= sizeof(u32)) { 378 + writel(buffer_32[i++], priv->fpga_data_addr); 379 + count -= sizeof(u32); 380 + } 381 + 382 + /* Write out remaining non 32-bit chunks */ 383 + switch (count) { 384 + case 3: 385 + writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr); 386 + break; 387 + case 2: 388 + writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr); 389 + break; 390 + case 1: 391 + writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr); 392 + break; 393 + case 0: 394 + break; 395 + default: 396 + /* This will never happen */ 397 + return -EFAULT; 398 + } 399 + 400 + return 0; 401 + } 402 + 403 + static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr, 404 + struct fpga_image_info *info) 405 + { 406 + struct a10_fpga_priv *priv = mgr->priv; 407 + u32 reg; 408 + int ret; 409 + 410 + /* Wait for pr_done */ 411 + ret = socfpga_a10_fpga_wait_for_pr_done(priv); 412 + 413 + /* Clear pr_request */ 414 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 415 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0); 416 + 417 + /* Send some clocks to clear out any errors */ 418 + socfpga_a10_fpga_generate_dclks(priv, 256); 419 + 420 + /* Disable s2f dclk and data */ 421 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST, 422 + A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0); 423 + 424 + /* Deassert chip select */ 425 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 426 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE, 427 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE); 428 + 429 + /* Disable data, dclk, nce, and pr_request override to CSS */ 430 + regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST, 431 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 432 + A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG); 433 + 434 + /* Return any errors regarding pr_done or pr_error */ 435 + if (ret) 436 + return ret; 437 + 438 + /* Final check */ 439 + reg = socfpga_a10_fpga_read_stat(priv); 440 + 441 + if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) || 442 + ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) || 443 + ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) { 444 + dev_dbg(&mgr->dev, 445 + "Timeout in final check. Status=%08xf\n", reg); 446 + return -ETIMEDOUT; 447 + } 448 + 449 + return 0; 450 + } 451 + 452 + static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr) 453 + { 454 + struct a10_fpga_priv *priv = mgr->priv; 455 + u32 reg = socfpga_a10_fpga_read_stat(priv); 456 + 457 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) 458 + return FPGA_MGR_STATE_OPERATING; 459 + 460 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY) 461 + return FPGA_MGR_STATE_WRITE; 462 + 463 + if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR) 464 + return FPGA_MGR_STATE_WRITE_COMPLETE_ERR; 465 + 466 + if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0) 467 + return FPGA_MGR_STATE_RESET; 468 + 469 + return FPGA_MGR_STATE_UNKNOWN; 470 + } 471 + 472 + static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = { 473 + .initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4, 474 + .state = socfpga_a10_fpga_state, 475 + .write_init = socfpga_a10_fpga_write_init, 476 + .write = socfpga_a10_fpga_write, 477 + .write_complete = socfpga_a10_fpga_write_complete, 478 + }; 479 + 480 + static int socfpga_a10_fpga_probe(struct platform_device *pdev) 481 + { 482 + struct device *dev = &pdev->dev; 483 + struct a10_fpga_priv *priv; 484 + void __iomem *reg_base; 485 + struct resource *res; 486 + int ret; 487 + 488 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 489 + if (!priv) 490 + return -ENOMEM; 491 + 492 + /* First mmio base is for register access */ 493 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 494 + reg_base = devm_ioremap_resource(dev, res); 495 + if (IS_ERR(reg_base)) 496 + return PTR_ERR(reg_base); 497 + 498 + /* Second mmio base is for writing FPGA image data */ 499 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 500 + priv->fpga_data_addr = devm_ioremap_resource(dev, res); 501 + if (IS_ERR(priv->fpga_data_addr)) 502 + return PTR_ERR(priv->fpga_data_addr); 503 + 504 + /* regmap for register access */ 505 + priv->regmap = devm_regmap_init_mmio(dev, reg_base, 506 + &socfpga_a10_fpga_regmap_config); 507 + if (IS_ERR(priv->regmap)) 508 + return -ENODEV; 509 + 510 + priv->clk = devm_clk_get(dev, NULL); 511 + if (IS_ERR(priv->clk)) { 512 + dev_err(dev, "no clock specified\n"); 513 + return PTR_ERR(priv->clk); 514 + } 515 + 516 + ret = clk_prepare_enable(priv->clk); 517 + if (ret) { 518 + dev_err(dev, "could not enable clock\n"); 519 + return -EBUSY; 520 + } 521 + 522 + return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager", 523 + &socfpga_a10_fpga_mgr_ops, priv); 524 + } 525 + 526 + static int socfpga_a10_fpga_remove(struct platform_device *pdev) 527 + { 528 + struct fpga_manager *mgr = platform_get_drvdata(pdev); 529 + struct a10_fpga_priv *priv = mgr->priv; 530 + 531 + fpga_mgr_unregister(&pdev->dev); 532 + clk_disable_unprepare(priv->clk); 533 + 534 + return 0; 535 + } 536 + 537 + static const struct of_device_id socfpga_a10_fpga_of_match[] = { 538 + { .compatible = "altr,socfpga-a10-fpga-mgr", }, 539 + {}, 540 + }; 541 + 542 + MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match); 543 + 544 + static struct platform_driver socfpga_a10_fpga_driver = { 545 + .probe = socfpga_a10_fpga_probe, 546 + .remove = socfpga_a10_fpga_remove, 547 + .driver = { 548 + .name = "socfpga_a10_fpga_manager", 549 + .of_match_table = socfpga_a10_fpga_of_match, 550 + }, 551 + }; 552 + 553 + module_platform_driver(socfpga_a10_fpga_driver); 554 + 555 + MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>"); 556 + MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager"); 557 + MODULE_LICENSE("GPL v2");
+4 -3
drivers/fpga/socfpga.c
··· 407 407 /* 408 408 * Prepare the FPGA to receive the configuration data. 409 409 */ 410 - static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags, 410 + static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, 411 + struct fpga_image_info *info, 411 412 const char *buf, size_t count) 412 413 { 413 414 struct socfpga_fpga_priv *priv = mgr->priv; 414 415 int ret; 415 416 416 - if (flags & FPGA_MGR_PARTIAL_RECONFIG) { 417 + if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) { 417 418 dev_err(&mgr->dev, "Partial reconfiguration not supported.\n"); 418 419 return -EINVAL; 419 420 } ··· 479 478 } 480 479 481 480 static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr, 482 - u32 flags) 481 + struct fpga_image_info *info) 483 482 { 484 483 struct socfpga_fpga_priv *priv = mgr->priv; 485 484 u32 status;
+29 -27
drivers/fpga/zynq-fpga.c
··· 118 118 #define FPGA_RST_NONE_MASK 0x0 119 119 120 120 struct zynq_fpga_priv { 121 - struct device *dev; 122 121 int irq; 123 122 struct clk *clk; 124 123 ··· 174 175 return IRQ_HANDLED; 175 176 } 176 177 177 - static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags, 178 + static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, 179 + struct fpga_image_info *info, 178 180 const char *buf, size_t count) 179 181 { 180 182 struct zynq_fpga_priv *priv; ··· 189 189 return err; 190 190 191 191 /* don't globally reset PL if we're doing partial reconfig */ 192 - if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) { 192 + if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { 193 193 /* assert AXI interface resets */ 194 194 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET, 195 195 FPGA_RST_ALL_MASK); ··· 217 217 INIT_POLL_DELAY, 218 218 INIT_POLL_TIMEOUT); 219 219 if (err) { 220 - dev_err(priv->dev, "Timeout waiting for PCFG_INIT"); 220 + dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); 221 221 goto out_err; 222 222 } 223 223 ··· 231 231 INIT_POLL_DELAY, 232 232 INIT_POLL_TIMEOUT); 233 233 if (err) { 234 - dev_err(priv->dev, "Timeout waiting for !PCFG_INIT"); 234 + dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n"); 235 235 goto out_err; 236 236 } 237 237 ··· 245 245 INIT_POLL_DELAY, 246 246 INIT_POLL_TIMEOUT); 247 247 if (err) { 248 - dev_err(priv->dev, "Timeout waiting for PCFG_INIT"); 248 + dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n"); 249 249 goto out_err; 250 250 } 251 251 } ··· 262 262 /* check that we have room in the command queue */ 263 263 status = zynq_fpga_read(priv, STATUS_OFFSET); 264 264 if (status & STATUS_DMA_Q_F) { 265 - dev_err(priv->dev, "DMA command queue full"); 265 + dev_err(&mgr->dev, "DMA command queue full\n"); 266 266 err = -EBUSY; 267 267 goto out_err; 268 268 } ··· 295 295 in_count = count; 296 296 priv = mgr->priv; 297 297 298 - kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL); 298 + kbuf = 299 + dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL); 299 300 if (!kbuf) 300 301 return -ENOMEM; 301 302 ··· 332 331 zynq_fpga_write(priv, INT_STS_OFFSET, intr_status); 333 332 334 333 if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) { 335 - dev_err(priv->dev, "Error configuring FPGA"); 334 + dev_err(&mgr->dev, "Error configuring FPGA\n"); 336 335 err = -EFAULT; 337 336 } 338 337 339 338 clk_disable(priv->clk); 340 339 341 340 out_free: 342 - dma_free_coherent(priv->dev, in_count, kbuf, dma_addr); 343 - 341 + dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr); 344 342 return err; 345 343 } 346 344 347 - static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags) 345 + static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, 346 + struct fpga_image_info *info) 348 347 { 349 348 struct zynq_fpga_priv *priv = mgr->priv; 350 349 int err; ··· 365 364 return err; 366 365 367 366 /* for the partial reconfig case we didn't touch the level shifters */ 368 - if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) { 367 + if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { 369 368 /* enable level shifters from PL to PS */ 370 369 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET, 371 370 LVL_SHFTR_ENABLE_PL_TO_PS); ··· 417 416 if (!priv) 418 417 return -ENOMEM; 419 418 420 - priv->dev = dev; 421 - 422 419 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 423 420 priv->io_base = devm_ioremap_resource(dev, res); 424 421 if (IS_ERR(priv->io_base)) ··· 425 426 priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node, 426 427 "syscon"); 427 428 if (IS_ERR(priv->slcr)) { 428 - dev_err(dev, "unable to get zynq-slcr regmap"); 429 + dev_err(dev, "unable to get zynq-slcr regmap\n"); 429 430 return PTR_ERR(priv->slcr); 430 431 } 431 432 ··· 433 434 434 435 priv->irq = platform_get_irq(pdev, 0); 435 436 if (priv->irq < 0) { 436 - dev_err(dev, "No IRQ available"); 437 + dev_err(dev, "No IRQ available\n"); 437 438 return priv->irq; 438 - } 439 - 440 - err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, 441 - dev_name(dev), priv); 442 - if (err) { 443 - dev_err(dev, "unable to request IRQ"); 444 - return err; 445 439 } 446 440 447 441 priv->clk = devm_clk_get(dev, "ref_clk"); 448 442 if (IS_ERR(priv->clk)) { 449 - dev_err(dev, "input clock not found"); 443 + dev_err(dev, "input clock not found\n"); 450 444 return PTR_ERR(priv->clk); 451 445 } 452 446 453 447 err = clk_prepare_enable(priv->clk); 454 448 if (err) { 455 - dev_err(dev, "unable to enable clock"); 449 + dev_err(dev, "unable to enable clock\n"); 456 450 return err; 457 451 } 458 452 459 453 /* unlock the device */ 460 454 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK); 461 455 456 + zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF); 457 + zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK); 458 + err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev), 459 + priv); 460 + if (err) { 461 + dev_err(dev, "unable to request IRQ\n"); 462 + clk_disable_unprepare(priv->clk); 463 + return err; 464 + } 465 + 462 466 clk_disable(priv->clk); 463 467 464 468 err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", 465 469 &zynq_fpga_ops, priv); 466 470 if (err) { 467 - dev_err(dev, "unable to register FPGA manager"); 471 + dev_err(dev, "unable to register FPGA manager\n"); 468 472 clk_unprepare(priv->clk); 469 473 return err; 470 474 }
+12 -81
drivers/hv/channel.c
··· 39 39 * vmbus_setevent- Trigger an event notification on the specified 40 40 * channel. 41 41 */ 42 - static void vmbus_setevent(struct vmbus_channel *channel) 42 + void vmbus_setevent(struct vmbus_channel *channel) 43 43 { 44 44 struct hv_monitor_page *monitorpage; 45 45 ··· 65 65 vmbus_set_event(channel); 66 66 } 67 67 } 68 + EXPORT_SYMBOL_GPL(vmbus_setevent); 68 69 69 70 /* 70 71 * vmbus_open - Open the specified channel. ··· 636 635 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 637 636 struct kvec bufferlist[3]; 638 637 u64 aligned_data = 0; 639 - int ret; 640 - bool signal = false; 641 638 bool lock = channel->acquire_ring_lock; 642 639 int num_vecs = ((bufferlen != 0) ? 3 : 1); 643 640 ··· 655 656 bufferlist[2].iov_base = &aligned_data; 656 657 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 657 658 658 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, 659 - &signal, lock, channel->signal_policy); 659 + return hv_ringbuffer_write(channel, bufferlist, num_vecs, 660 + lock, kick_q); 660 661 661 - /* 662 - * Signalling the host is conditional on many factors: 663 - * 1. The ring state changed from being empty to non-empty. 664 - * This is tracked by the variable "signal". 665 - * 2. The variable kick_q tracks if more data will be placed 666 - * on the ring. We will not signal if more data is 667 - * to be placed. 668 - * 669 - * Based on the channel signal state, we will decide 670 - * which signaling policy will be applied. 671 - * 672 - * If we cannot write to the ring-buffer; signal the host 673 - * even if we may not have written anything. This is a rare 674 - * enough condition that it should not matter. 675 - * NOTE: in this case, the hvsock channel is an exception, because 676 - * it looks the host side's hvsock implementation has a throttling 677 - * mechanism which can hurt the performance otherwise. 678 - */ 679 - 680 - if (((ret == 0) && kick_q && signal) || 681 - (ret && !is_hvsock_channel(channel))) 682 - vmbus_setevent(channel); 683 - 684 - return ret; 685 662 } 686 663 EXPORT_SYMBOL(vmbus_sendpacket_ctl); 687 664 ··· 698 723 u32 flags, 699 724 bool kick_q) 700 725 { 701 - int ret; 702 726 int i; 703 727 struct vmbus_channel_packet_page_buffer desc; 704 728 u32 descsize; ··· 705 731 u32 packetlen_aligned; 706 732 struct kvec bufferlist[3]; 707 733 u64 aligned_data = 0; 708 - bool signal = false; 709 734 bool lock = channel->acquire_ring_lock; 710 735 711 736 if (pagecount > MAX_PAGE_BUFFER_COUNT) ··· 742 769 bufferlist[2].iov_base = &aligned_data; 743 770 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 744 771 745 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 746 - &signal, lock, channel->signal_policy); 747 - 748 - /* 749 - * Signalling the host is conditional on many factors: 750 - * 1. The ring state changed from being empty to non-empty. 751 - * This is tracked by the variable "signal". 752 - * 2. The variable kick_q tracks if more data will be placed 753 - * on the ring. We will not signal if more data is 754 - * to be placed. 755 - * 756 - * Based on the channel signal state, we will decide 757 - * which signaling policy will be applied. 758 - * 759 - * If we cannot write to the ring-buffer; signal the host 760 - * even if we may not have written anything. This is a rare 761 - * enough condition that it should not matter. 762 - */ 763 - 764 - if (((ret == 0) && kick_q && signal) || (ret)) 765 - vmbus_setevent(channel); 766 - 767 - return ret; 772 + return hv_ringbuffer_write(channel, bufferlist, 3, 773 + lock, kick_q); 768 774 } 769 775 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); 770 776 ··· 774 822 u32 desc_size, 775 823 void *buffer, u32 bufferlen, u64 requestid) 776 824 { 777 - int ret; 778 825 u32 packetlen; 779 826 u32 packetlen_aligned; 780 827 struct kvec bufferlist[3]; 781 828 u64 aligned_data = 0; 782 - bool signal = false; 783 829 bool lock = channel->acquire_ring_lock; 784 830 785 831 packetlen = desc_size + bufferlen; ··· 798 848 bufferlist[2].iov_base = &aligned_data; 799 849 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 800 850 801 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 802 - &signal, lock, channel->signal_policy); 803 - 804 - if (ret == 0 && signal) 805 - vmbus_setevent(channel); 806 - 807 - return ret; 851 + return hv_ringbuffer_write(channel, bufferlist, 3, 852 + lock, true); 808 853 } 809 854 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 810 855 ··· 811 866 struct hv_multipage_buffer *multi_pagebuffer, 812 867 void *buffer, u32 bufferlen, u64 requestid) 813 868 { 814 - int ret; 815 869 struct vmbus_channel_packet_multipage_buffer desc; 816 870 u32 descsize; 817 871 u32 packetlen; 818 872 u32 packetlen_aligned; 819 873 struct kvec bufferlist[3]; 820 874 u64 aligned_data = 0; 821 - bool signal = false; 822 875 bool lock = channel->acquire_ring_lock; 823 876 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 824 877 multi_pagebuffer->len); ··· 856 913 bufferlist[2].iov_base = &aligned_data; 857 914 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 858 915 859 - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, 860 - &signal, lock, channel->signal_policy); 861 - 862 - if (ret == 0 && signal) 863 - vmbus_setevent(channel); 864 - 865 - return ret; 916 + return hv_ringbuffer_write(channel, bufferlist, 3, 917 + lock, true); 866 918 } 867 919 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); 868 920 ··· 879 941 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid, 880 942 bool raw) 881 943 { 882 - int ret; 883 - bool signal = false; 944 + return hv_ringbuffer_read(channel, buffer, bufferlen, 945 + buffer_actual_len, requestid, raw); 884 946 885 - ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen, 886 - buffer_actual_len, requestid, &signal, raw); 887 - 888 - if (signal) 889 - vmbus_setevent(channel); 890 - 891 - return ret; 892 947 } 893 948 894 949 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
+5 -5
drivers/hv/channel_mgmt.c
··· 134 134 }, 135 135 136 136 /* Unknown GUID */ 137 - { .dev_type = HV_UNKOWN, 137 + { .dev_type = HV_UNKNOWN, 138 138 .perf_device = false, 139 139 }, 140 140 }; ··· 163 163 u16 i; 164 164 165 165 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid)) 166 - return HV_UNKOWN; 166 + return HV_UNKNOWN; 167 167 168 - for (i = HV_IDE; i < HV_UNKOWN; i++) { 168 + for (i = HV_IDE; i < HV_UNKNOWN; i++) { 169 169 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid)) 170 170 return i; 171 171 } ··· 389 389 { 390 390 struct vmbus_channel *channel, *tmp; 391 391 392 + mutex_lock(&vmbus_connection.channel_mutex); 392 393 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list, 393 394 listentry) { 394 395 /* hv_process_channel_removal() needs this */ ··· 397 396 398 397 vmbus_device_unregister(channel->device_obj); 399 398 } 399 + mutex_unlock(&vmbus_connection.channel_mutex); 400 400 } 401 401 402 402 /* ··· 449 447 } 450 448 451 449 dev_type = hv_get_dev_type(newchannel); 452 - if (dev_type == HV_NIC) 453 - set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT); 454 450 455 451 init_vp_index(newchannel, dev_type); 456 452
+1
drivers/hv/connection.c
··· 39 39 .conn_state = DISCONNECTED, 40 40 .next_gpadl_handle = ATOMIC_INIT(0xE1E10), 41 41 }; 42 + EXPORT_SYMBOL_GPL(vmbus_connection); 42 43 43 44 /* 44 45 * Negotiated protocol version with the host.
+4 -2
drivers/hv/hv.c
··· 575 575 if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)) 576 576 return; 577 577 578 - for_each_online_cpu(cpu) 578 + for_each_present_cpu(cpu) 579 579 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); 580 580 } 581 581 ··· 594 594 return; 595 595 596 596 /* Turn off clockevent device */ 597 - if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) 597 + if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) { 598 + clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); 598 599 hv_ce_shutdown(hv_context.clk_evt[cpu]); 600 + } 599 601 600 602 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 601 603
+39 -5
drivers/hv/hv_balloon.c
··· 564 564 * next version to try. 565 565 */ 566 566 __u32 next_version; 567 + 568 + /* 569 + * The negotiated version agreed by host. 570 + */ 571 + __u32 version; 567 572 }; 568 573 569 574 static struct hv_dynmem_device dm_device; ··· 650 645 { 651 646 int i; 652 647 648 + pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); 653 649 for (i = 0; i < size; i++) 654 650 hv_page_online_one(has, pfn_to_page(start_pfn + i)); 655 651 } ··· 691 685 (HA_CHUNK << PAGE_SHIFT)); 692 686 693 687 if (ret) { 694 - pr_info("hot_add memory failed error is %d\n", ret); 688 + pr_warn("hot_add memory failed error is %d\n", ret); 695 689 if (ret == -EEXIST) { 696 690 /* 697 691 * This error indicates that the error ··· 819 813 unsigned long pgs_ol = 0; 820 814 unsigned long old_covered_state; 821 815 unsigned long res = 0, flags; 816 + 817 + pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, 818 + pg_start); 822 819 823 820 spin_lock_irqsave(&dm_device.ha_lock, flags); 824 821 list_for_each_entry(has, &dm_device.ha_region_list, list) { ··· 1034 1025 1035 1026 switch (info_hdr->type) { 1036 1027 case INFO_TYPE_MAX_PAGE_CNT: 1037 - pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n"); 1038 - pr_info("Data Size is %d\n", info_hdr->data_size); 1028 + if (info_hdr->data_size == sizeof(__u64)) { 1029 + __u64 *max_page_count = (__u64 *)&info_hdr[1]; 1030 + 1031 + pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n", 1032 + *max_page_count); 1033 + } 1034 + 1039 1035 break; 1040 1036 default: 1041 1037 pr_info("Received Unknown type: %d\n", info_hdr->type); ··· 1210 1196 return num_pages; 1211 1197 } 1212 1198 1213 - 1214 - 1215 1199 static void balloon_up(struct work_struct *dummy) 1216 1200 { 1217 1201 unsigned int num_pages = dm_device.balloon_wrk.num_pages; ··· 1236 1224 1237 1225 /* Refuse to balloon below the floor, keep the 2M granularity. */ 1238 1226 if (avail_pages < num_pages || avail_pages - num_pages < floor) { 1227 + pr_warn("Balloon request will be partially fulfilled. %s\n", 1228 + avail_pages < num_pages ? "Not enough memory." : 1229 + "Balloon floor reached."); 1230 + 1239 1231 num_pages = avail_pages > floor ? (avail_pages - floor) : 0; 1240 1232 num_pages -= num_pages % PAGES_IN_2M; 1241 1233 } ··· 1261 1245 } 1262 1246 1263 1247 if (num_ballooned == 0 || num_ballooned == num_pages) { 1248 + pr_debug("Ballooned %u out of %u requested pages.\n", 1249 + num_pages, dm_device.balloon_wrk.num_pages); 1250 + 1264 1251 bl_resp->more_pages = 0; 1265 1252 done = true; 1266 1253 dm_device.state = DM_INITIALIZED; ··· 1311 1292 int range_count = req->range_count; 1312 1293 struct dm_unballoon_response resp; 1313 1294 int i; 1295 + unsigned int prev_pages_ballooned = dm->num_pages_ballooned; 1314 1296 1315 1297 for (i = 0; i < range_count; i++) { 1316 1298 free_balloon_pages(dm, &range_array[i]); 1317 1299 complete(&dm_device.config_event); 1318 1300 } 1301 + 1302 + pr_debug("Freed %u ballooned pages.\n", 1303 + prev_pages_ballooned - dm->num_pages_ballooned); 1319 1304 1320 1305 if (req->more_pages == 1) 1321 1306 return; ··· 1388 1365 version_req.hdr.size = sizeof(struct dm_version_request); 1389 1366 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1390 1367 version_req.version.version = dm->next_version; 1368 + dm->version = version_req.version.version; 1391 1369 1392 1370 /* 1393 1371 * Set the next version to try in case current version fails. ··· 1525 1501 struct dm_version_request version_req; 1526 1502 struct dm_capabilities cap_msg; 1527 1503 1504 + #ifdef CONFIG_MEMORY_HOTPLUG 1528 1505 do_hot_add = hot_add; 1506 + #else 1507 + do_hot_add = false; 1508 + #endif 1529 1509 1530 1510 /* 1531 1511 * First allocate a send buffer. ··· 1581 1553 version_req.hdr.trans_id = atomic_inc_return(&trans_id); 1582 1554 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10; 1583 1555 version_req.is_last_attempt = 0; 1556 + dm_device.version = version_req.version.version; 1584 1557 1585 1558 ret = vmbus_sendpacket(dev->channel, &version_req, 1586 1559 sizeof(struct dm_version_request), ··· 1604 1575 ret = -ETIMEDOUT; 1605 1576 goto probe_error2; 1606 1577 } 1578 + 1579 + pr_info("Using Dynamic Memory protocol version %u.%u\n", 1580 + DYNMEM_MAJOR_VERSION(dm_device.version), 1581 + DYNMEM_MINOR_VERSION(dm_device.version)); 1582 + 1607 1583 /* 1608 1584 * Now submit our capabilities to the host. 1609 1585 */
+25 -8
drivers/hv/hv_snapshot.c
··· 31 31 #define VSS_MINOR 0 32 32 #define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 33 33 34 - #define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000)) 34 + /* 35 + * Timeout values are based on expecations from host 36 + */ 37 + #define VSS_FREEZE_TIMEOUT (15 * 60) 35 38 36 39 /* 37 40 * Global state maintained for transaction that is being processed. For a class ··· 123 120 default: 124 121 return -EINVAL; 125 122 } 126 - pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value); 123 + pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value); 127 124 return 0; 128 125 } 129 126 ··· 131 128 { 132 129 struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg; 133 130 134 - if (len != sizeof(*vss_msg)) 131 + if (len != sizeof(*vss_msg)) { 132 + pr_debug("VSS: Message size does not match length\n"); 135 133 return -EINVAL; 134 + } 136 135 137 136 if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER || 138 137 vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) { ··· 142 137 * Don't process registration messages if we're in the middle 143 138 * of a transaction processing. 144 139 */ 145 - if (vss_transaction.state > HVUTIL_READY) 140 + if (vss_transaction.state > HVUTIL_READY) { 141 + pr_debug("VSS: Got unexpected registration request\n"); 146 142 return -EINVAL; 143 + } 144 + 147 145 return vss_handle_handshake(vss_msg); 148 146 } else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) { 149 147 vss_transaction.state = HVUTIL_USERSPACE_RECV; ··· 163 155 } 164 156 } else { 165 157 /* This is a spurious call! */ 166 - pr_warn("VSS: Transaction not active\n"); 158 + pr_debug("VSS: Transaction not active\n"); 167 159 return -EINVAL; 168 160 } 169 161 return 0; ··· 176 168 struct hv_vss_msg *vss_msg; 177 169 178 170 /* The transaction state is wrong. */ 179 - if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) 171 + if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) { 172 + pr_debug("VSS: Unexpected attempt to send to daemon\n"); 180 173 return; 174 + } 181 175 182 176 vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); 183 177 if (!vss_msg) ··· 189 179 190 180 vss_transaction.state = HVUTIL_USERSPACE_REQ; 191 181 192 - schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT); 182 + schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ? 183 + VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ); 193 184 194 185 rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); 195 186 if (rc) { ··· 221 210 case VSS_OP_HOT_BACKUP: 222 211 if (vss_transaction.state < HVUTIL_READY) { 223 212 /* Userspace is not registered yet */ 213 + pr_debug("VSS: Not ready for request.\n"); 224 214 vss_respond_to_host(HV_E_FAIL); 225 215 return; 226 216 } 217 + 218 + pr_debug("VSS: Received request for op code: %d\n", 219 + vss_transaction.msg->vss_hdr.operation); 227 220 vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED; 228 221 vss_send_op(); 229 222 return; ··· 368 353 369 354 hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL, 370 355 vss_on_msg, vss_on_reset); 371 - if (!hvt) 356 + if (!hvt) { 357 + pr_warn("VSS: Failed to initialize transport\n"); 372 358 return -EFAULT; 359 + } 373 360 374 361 return 0; 375 362 }
+10 -7
drivers/hv/hv_util.c
··· 389 389 ts_srv_version = TS_VERSION_1; 390 390 hb_srv_version = HB_VERSION_1; 391 391 break; 392 - case(VERSION_WIN10): 393 - util_fw_version = UTIL_FW_VERSION; 394 - sd_srv_version = SD_VERSION; 395 - ts_srv_version = TS_VERSION; 396 - hb_srv_version = HB_VERSION; 397 - break; 398 - default: 392 + case VERSION_WIN7: 393 + case VERSION_WIN8: 394 + case VERSION_WIN8_1: 399 395 util_fw_version = UTIL_FW_VERSION; 400 396 sd_srv_version = SD_VERSION; 401 397 ts_srv_version = TS_VERSION_3; 398 + hb_srv_version = HB_VERSION; 399 + break; 400 + case VERSION_WIN10: 401 + default: 402 + util_fw_version = UTIL_FW_VERSION; 403 + sd_srv_version = SD_VERSION; 404 + ts_srv_version = TS_VERSION; 402 405 hb_srv_version = HB_VERSION; 403 406 } 404 407
+6 -6
drivers/hv/hyperv_vmbus.h
··· 38 38 /* 39 39 * Timeout for guest-host handshake for services. 40 40 */ 41 - #define HV_UTIL_NEGO_TIMEOUT 60 41 + #define HV_UTIL_NEGO_TIMEOUT 55 42 42 43 43 /* 44 44 * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent ··· 527 527 528 528 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 529 529 530 - int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 530 + int hv_ringbuffer_write(struct vmbus_channel *channel, 531 531 struct kvec *kv_list, 532 - u32 kv_count, bool *signal, bool lock, 533 - enum hv_signal_policy policy); 532 + u32 kv_count, bool lock, 533 + bool kick_q); 534 534 535 - int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 535 + int hv_ringbuffer_read(struct vmbus_channel *channel, 536 536 void *buffer, u32 buflen, u32 *buffer_actual_len, 537 - u64 *requestid, bool *signal, bool raw); 537 + u64 *requestid, bool raw); 538 538 539 539 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 540 540 struct hv_ring_buffer_debug_info *debug_info);
+25 -19
drivers/hv/ring_buffer.c
··· 66 66 * once the ring buffer is empty, it will clear the 67 67 * interrupt_mask and re-check to see if new data has 68 68 * arrived. 69 + * 70 + * KYS: Oct. 30, 2016: 71 + * It looks like Windows hosts have logic to deal with DOS attacks that 72 + * can be triggered if it receives interrupts when it is not expecting 73 + * the interrupt. The host expects interrupts only when the ring 74 + * transitions from empty to non-empty (or full to non full on the guest 75 + * to host ring). 76 + * So, base the signaling decision solely on the ring state until the 77 + * host logic is fixed. 69 78 */ 70 79 71 - static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi, 72 - enum hv_signal_policy policy) 80 + static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel, 81 + bool kick_q) 73 82 { 83 + struct hv_ring_buffer_info *rbi = &channel->outbound; 84 + 74 85 virt_mb(); 75 86 if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) 76 - return false; 77 - 78 - /* 79 - * When the client wants to control signaling, 80 - * we only honour the host interrupt mask. 81 - */ 82 - if (policy == HV_SIGNAL_POLICY_EXPLICIT) 83 - return true; 87 + return; 84 88 85 89 /* check interrupt_mask before read_index */ 86 90 virt_rmb(); ··· 93 89 * ring transitions from being empty to non-empty. 94 90 */ 95 91 if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) 96 - return true; 92 + vmbus_setevent(channel); 97 93 98 - return false; 94 + return; 99 95 } 100 96 101 97 /* Get the next write location for the specified ring buffer. */ ··· 284 280 } 285 281 286 282 /* Write to the ring buffer. */ 287 - int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 288 - struct kvec *kv_list, u32 kv_count, bool *signal, bool lock, 289 - enum hv_signal_policy policy) 283 + int hv_ringbuffer_write(struct vmbus_channel *channel, 284 + struct kvec *kv_list, u32 kv_count, bool lock, 285 + bool kick_q) 290 286 { 291 287 int i = 0; 292 288 u32 bytes_avail_towrite; ··· 296 292 u32 old_write; 297 293 u64 prev_indices = 0; 298 294 unsigned long flags = 0; 295 + struct hv_ring_buffer_info *outring_info = &channel->outbound; 299 296 300 297 for (i = 0; i < kv_count; i++) 301 298 totalbytes_towrite += kv_list[i].iov_len; ··· 349 344 if (lock) 350 345 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 351 346 352 - *signal = hv_need_to_signal(old_write, outring_info, policy); 347 + hv_signal_on_write(old_write, channel, kick_q); 353 348 return 0; 354 349 } 355 350 356 - int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 351 + int hv_ringbuffer_read(struct vmbus_channel *channel, 357 352 void *buffer, u32 buflen, u32 *buffer_actual_len, 358 - u64 *requestid, bool *signal, bool raw) 353 + u64 *requestid, bool raw) 359 354 { 360 355 u32 bytes_avail_toread; 361 356 u32 next_read_location = 0; ··· 364 359 u32 offset; 365 360 u32 packetlen; 366 361 int ret = 0; 362 + struct hv_ring_buffer_info *inring_info = &channel->inbound; 367 363 368 364 if (buflen <= 0) 369 365 return -EINVAL; ··· 422 416 /* Update the read index */ 423 417 hv_set_next_read_location(inring_info, next_read_location); 424 418 425 - *signal = hv_need_to_signal_on_read(inring_info); 419 + hv_signal_on_read(channel); 426 420 427 421 return ret; 428 422 }
+166 -8
drivers/hv/vmbus_drv.c
··· 45 45 #include <linux/random.h> 46 46 #include "hyperv_vmbus.h" 47 47 48 + struct vmbus_dynid { 49 + struct list_head node; 50 + struct hv_vmbus_device_id id; 51 + }; 52 + 48 53 static struct acpi_device *hv_acpi_dev; 49 54 50 55 static struct completion probe_event; ··· 505 500 static DEVICE_ATTR_RO(device); 506 501 507 502 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ 508 - static struct attribute *vmbus_attrs[] = { 503 + static struct attribute *vmbus_dev_attrs[] = { 509 504 &dev_attr_id.attr, 510 505 &dev_attr_state.attr, 511 506 &dev_attr_monitor_id.attr, ··· 533 528 &dev_attr_device.attr, 534 529 NULL, 535 530 }; 536 - ATTRIBUTE_GROUPS(vmbus); 531 + ATTRIBUTE_GROUPS(vmbus_dev); 537 532 538 533 /* 539 534 * vmbus_uevent - add uevent for our device ··· 570 565 * Return a matching hv_vmbus_device_id pointer. 571 566 * If there is no match, return NULL. 572 567 */ 573 - static const struct hv_vmbus_device_id *hv_vmbus_get_id( 574 - const struct hv_vmbus_device_id *id, 568 + static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, 575 569 const uuid_le *guid) 576 570 { 571 + const struct hv_vmbus_device_id *id = NULL; 572 + struct vmbus_dynid *dynid; 573 + 574 + /* Look at the dynamic ids first, before the static ones */ 575 + spin_lock(&drv->dynids.lock); 576 + list_for_each_entry(dynid, &drv->dynids.list, node) { 577 + if (!uuid_le_cmp(dynid->id.guid, *guid)) { 578 + id = &dynid->id; 579 + break; 580 + } 581 + } 582 + spin_unlock(&drv->dynids.lock); 583 + 584 + if (id) 585 + return id; 586 + 587 + id = drv->id_table; 588 + if (id == NULL) 589 + return NULL; /* empty device table */ 590 + 577 591 for (; !is_null_guid(&id->guid); id++) 578 592 if (!uuid_le_cmp(id->guid, *guid)) 579 593 return id; ··· 600 576 return NULL; 601 577 } 602 578 579 + /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ 580 + static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid) 581 + { 582 + struct vmbus_dynid *dynid; 583 + 584 + dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 585 + if (!dynid) 586 + return -ENOMEM; 587 + 588 + dynid->id.guid = *guid; 589 + 590 + spin_lock(&drv->dynids.lock); 591 + list_add_tail(&dynid->node, &drv->dynids.list); 592 + spin_unlock(&drv->dynids.lock); 593 + 594 + return driver_attach(&drv->driver); 595 + } 596 + 597 + static void vmbus_free_dynids(struct hv_driver *drv) 598 + { 599 + struct vmbus_dynid *dynid, *n; 600 + 601 + spin_lock(&drv->dynids.lock); 602 + list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 603 + list_del(&dynid->node); 604 + kfree(dynid); 605 + } 606 + spin_unlock(&drv->dynids.lock); 607 + } 608 + 609 + /* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */ 610 + static int get_uuid_le(const char *str, uuid_le *uu) 611 + { 612 + unsigned int b[16]; 613 + int i; 614 + 615 + if (strlen(str) < 37) 616 + return -1; 617 + 618 + for (i = 0; i < 36; i++) { 619 + switch (i) { 620 + case 8: case 13: case 18: case 23: 621 + if (str[i] != '-') 622 + return -1; 623 + break; 624 + default: 625 + if (!isxdigit(str[i])) 626 + return -1; 627 + } 628 + } 629 + 630 + /* unparse little endian output byte order */ 631 + if (sscanf(str, 632 + "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x", 633 + &b[3], &b[2], &b[1], &b[0], 634 + &b[5], &b[4], &b[7], &b[6], &b[8], &b[9], 635 + &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16) 636 + return -1; 637 + 638 + for (i = 0; i < 16; i++) 639 + uu->b[i] = b[i]; 640 + return 0; 641 + } 642 + 643 + /* 644 + * store_new_id - sysfs frontend to vmbus_add_dynid() 645 + * 646 + * Allow GUIDs to be added to an existing driver via sysfs. 647 + */ 648 + static ssize_t new_id_store(struct device_driver *driver, const char *buf, 649 + size_t count) 650 + { 651 + struct hv_driver *drv = drv_to_hv_drv(driver); 652 + uuid_le guid = NULL_UUID_LE; 653 + ssize_t retval; 654 + 655 + if (get_uuid_le(buf, &guid) != 0) 656 + return -EINVAL; 657 + 658 + if (hv_vmbus_get_id(drv, &guid)) 659 + return -EEXIST; 660 + 661 + retval = vmbus_add_dynid(drv, &guid); 662 + if (retval) 663 + return retval; 664 + return count; 665 + } 666 + static DRIVER_ATTR_WO(new_id); 667 + 668 + /* 669 + * store_remove_id - remove a PCI device ID from this driver 670 + * 671 + * Removes a dynamic pci device ID to this driver. 672 + */ 673 + static ssize_t remove_id_store(struct device_driver *driver, const char *buf, 674 + size_t count) 675 + { 676 + struct hv_driver *drv = drv_to_hv_drv(driver); 677 + struct vmbus_dynid *dynid, *n; 678 + uuid_le guid = NULL_UUID_LE; 679 + size_t retval = -ENODEV; 680 + 681 + if (get_uuid_le(buf, &guid)) 682 + return -EINVAL; 683 + 684 + spin_lock(&drv->dynids.lock); 685 + list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { 686 + struct hv_vmbus_device_id *id = &dynid->id; 687 + 688 + if (!uuid_le_cmp(id->guid, guid)) { 689 + list_del(&dynid->node); 690 + kfree(dynid); 691 + retval = count; 692 + break; 693 + } 694 + } 695 + spin_unlock(&drv->dynids.lock); 696 + 697 + return retval; 698 + } 699 + static DRIVER_ATTR_WO(remove_id); 700 + 701 + static struct attribute *vmbus_drv_attrs[] = { 702 + &driver_attr_new_id.attr, 703 + &driver_attr_remove_id.attr, 704 + NULL, 705 + }; 706 + ATTRIBUTE_GROUPS(vmbus_drv); 603 707 604 708 605 709 /* ··· 742 590 if (is_hvsock_channel(hv_dev->channel)) 743 591 return drv->hvsock; 744 592 745 - if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) 593 + if (hv_vmbus_get_id(drv, &hv_dev->dev_type)) 746 594 return 1; 747 595 748 596 return 0; ··· 759 607 struct hv_device *dev = device_to_hv_device(child_device); 760 608 const struct hv_vmbus_device_id *dev_id; 761 609 762 - dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type); 610 + dev_id = hv_vmbus_get_id(drv, &dev->dev_type); 763 611 if (drv->probe) { 764 612 ret = drv->probe(dev, dev_id); 765 613 if (ret != 0) ··· 836 684 .remove = vmbus_remove, 837 685 .probe = vmbus_probe, 838 686 .uevent = vmbus_uevent, 839 - .dev_groups = vmbus_groups, 687 + .dev_groups = vmbus_dev_groups, 688 + .drv_groups = vmbus_drv_groups, 840 689 }; 841 690 842 691 struct onmessage_work_context { ··· 1058 905 hv_driver->driver.mod_name = mod_name; 1059 906 hv_driver->driver.bus = &hv_bus; 1060 907 908 + spin_lock_init(&hv_driver->dynids.lock); 909 + INIT_LIST_HEAD(&hv_driver->dynids.list); 910 + 1061 911 ret = driver_register(&hv_driver->driver); 1062 912 1063 913 return ret; ··· 1079 923 { 1080 924 pr_info("unregistering driver %s\n", hv_driver->name); 1081 925 1082 - if (!vmbus_exists()) 926 + if (!vmbus_exists()) { 1083 927 driver_unregister(&hv_driver->driver); 928 + vmbus_free_dynids(hv_driver); 929 + } 1084 930 } 1085 931 EXPORT_SYMBOL_GPL(vmbus_driver_unregister); 1086 932
+16 -15
drivers/hwtracing/coresight/coresight-etm-perf.c
··· 202 202 if (!event_data) 203 203 return NULL; 204 204 205 + /* 206 + * In theory nothing prevent tracers in a trace session from being 207 + * associated with different sinks, nor having a sink per tracer. But 208 + * until we have HW with this kind of topology we need to assume tracers 209 + * in a trace session are using the same sink. Therefore go through 210 + * the coresight bus and pick the first enabled sink. 211 + * 212 + * When operated from sysFS users are responsible to enable the sink 213 + * while from perf, the perf tools will do it based on the choice made 214 + * on the cmd line. As such the "enable_sink" flag in sysFS is reset. 215 + */ 216 + sink = coresight_get_enabled_sink(true); 217 + if (!sink) 218 + goto err; 219 + 205 220 INIT_WORK(&event_data->work, free_event_data); 206 221 207 222 mask = &event_data->mask; ··· 234 219 * list of devices from source to sink that can be 235 220 * referenced later when the path is actually needed. 236 221 */ 237 - event_data->path[cpu] = coresight_build_path(csdev); 222 + event_data->path[cpu] = coresight_build_path(csdev, sink); 238 223 if (IS_ERR(event_data->path[cpu])) 239 224 goto err; 240 225 } 241 - 242 - /* 243 - * In theory nothing prevent tracers in a trace session from being 244 - * associated with different sinks, nor having a sink per tracer. But 245 - * until we have HW with this kind of topology and a way to convey 246 - * sink assignement from the perf cmd line we need to assume tracers 247 - * in a trace session are using the same sink. Therefore pick the sink 248 - * found at the end of the first available path. 249 - */ 250 - cpu = cpumask_first(mask); 251 - /* Grab the sink at the end of the path */ 252 - sink = coresight_get_sink(event_data->path[cpu]); 253 - if (!sink) 254 - goto err; 255 226 256 227 if (!sink_ops(sink)->alloc_buffer) 257 228 goto err;
+5
drivers/hwtracing/coresight/coresight-etm.h
··· 89 89 /* ETMCR - 0x00 */ 90 90 #define ETMCR_PWD_DWN BIT(0) 91 91 #define ETMCR_STALL_MODE BIT(7) 92 + #define ETMCR_BRANCH_BROADCAST BIT(8) 92 93 #define ETMCR_ETM_PRG BIT(10) 93 94 #define ETMCR_ETM_EN BIT(11) 94 95 #define ETMCR_CYC_ACC BIT(12) 95 96 #define ETMCR_CTXID_SIZE (BIT(14)|BIT(15)) 96 97 #define ETMCR_TIMESTAMP_EN BIT(28) 98 + #define ETMCR_RETURN_STACK BIT(29) 97 99 /* ETMCCR - 0x04 */ 98 100 #define ETMCCR_FIFOFULL BIT(23) 99 101 /* ETMPDCR - 0x310 */ ··· 112 110 #define ETM_MODE_STALL BIT(2) 113 111 #define ETM_MODE_TIMESTAMP BIT(3) 114 112 #define ETM_MODE_CTXID BIT(4) 113 + #define ETM_MODE_BBROAD BIT(5) 114 + #define ETM_MODE_RET_STACK BIT(6) 115 115 #define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \ 116 116 ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \ 117 + ETM_MODE_BBROAD | ETM_MODE_RET_STACK | \ 117 118 ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \ 118 119 ETM_MODE_EXCL_USER) 119 120
+11 -1
drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
··· 146 146 goto err_unlock; 147 147 } 148 148 config->ctrl |= ETMCR_STALL_MODE; 149 - } else 149 + } else 150 150 config->ctrl &= ~ETMCR_STALL_MODE; 151 151 152 152 if (config->mode & ETM_MODE_TIMESTAMP) { ··· 163 163 config->ctrl |= ETMCR_CTXID_SIZE; 164 164 else 165 165 config->ctrl &= ~ETMCR_CTXID_SIZE; 166 + 167 + if (config->mode & ETM_MODE_BBROAD) 168 + config->ctrl |= ETMCR_BRANCH_BROADCAST; 169 + else 170 + config->ctrl &= ~ETMCR_BRANCH_BROADCAST; 171 + 172 + if (config->mode & ETM_MODE_RET_STACK) 173 + config->ctrl |= ETMCR_RETURN_STACK; 174 + else 175 + config->ctrl &= ~ETMCR_RETURN_STACK; 166 176 167 177 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 168 178 etm_config_trace_mode(config);
+3 -1
drivers/hwtracing/coresight/coresight-priv.h
··· 111 111 void coresight_disable_path(struct list_head *path); 112 112 int coresight_enable_path(struct list_head *path, u32 mode); 113 113 struct coresight_device *coresight_get_sink(struct list_head *path); 114 - struct list_head *coresight_build_path(struct coresight_device *csdev); 114 + struct coresight_device *coresight_get_enabled_sink(bool reset); 115 + struct list_head *coresight_build_path(struct coresight_device *csdev, 116 + struct coresight_device *sink); 115 117 void coresight_release_path(struct list_head *path); 116 118 117 119 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
+7 -2
drivers/hwtracing/coresight/coresight-stm.c
··· 419 419 struct stm_drvdata, stm); 420 420 421 421 if (!(drvdata && local_read(&drvdata->mode))) 422 - return 0; 422 + return -EACCES; 423 423 424 424 if (channel >= drvdata->numsp) 425 - return 0; 425 + return -EINVAL; 426 426 427 427 ch_addr = (unsigned long)stm_channel_addr(drvdata, channel); 428 428 ··· 919 919 .id = 0x0003b962, 920 920 .mask = 0x0003ffff, 921 921 .data = "STM32", 922 + }, 923 + { 924 + .id = 0x0003b963, 925 + .mask = 0x0003ffff, 926 + .data = "STM500", 922 927 }, 923 928 { 0, 0}, 924 929 };
+18 -30
drivers/hwtracing/coresight/coresight-tmc-etf.c
··· 70 70 * When operating in sysFS mode the content of the buffer needs to be 71 71 * read before the TMC is disabled. 72 72 */ 73 - if (local_read(&drvdata->mode) == CS_MODE_SYSFS) 73 + if (drvdata->mode == CS_MODE_SYSFS) 74 74 tmc_etb_dump_hw(drvdata); 75 75 tmc_disable_hw(drvdata); 76 76 ··· 103 103 CS_LOCK(drvdata->base); 104 104 } 105 105 106 - static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode) 106 + static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) 107 107 { 108 108 int ret = 0; 109 109 bool used = false; 110 110 char *buf = NULL; 111 - long val; 112 111 unsigned long flags; 113 112 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 114 - 115 - /* This shouldn't be happening */ 116 - if (WARN_ON(mode != CS_MODE_SYSFS)) 117 - return -EINVAL; 118 113 119 114 /* 120 115 * If we don't have a buffer release the lock and allocate memory. ··· 133 138 goto out; 134 139 } 135 140 136 - val = local_xchg(&drvdata->mode, mode); 137 141 /* 138 142 * In sysFS mode we can have multiple writers per sink. Since this 139 143 * sink is already enabled no memory is needed and the HW need not be 140 144 * touched. 141 145 */ 142 - if (val == CS_MODE_SYSFS) 146 + if (drvdata->mode == CS_MODE_SYSFS) 143 147 goto out; 144 148 145 149 /* ··· 157 163 drvdata->buf = buf; 158 164 } 159 165 166 + drvdata->mode = CS_MODE_SYSFS; 160 167 tmc_etb_enable_hw(drvdata); 161 168 out: 162 169 spin_unlock_irqrestore(&drvdata->spinlock, flags); ··· 172 177 return ret; 173 178 } 174 179 175 - static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode) 180 + static int tmc_enable_etf_sink_perf(struct coresight_device *csdev) 176 181 { 177 182 int ret = 0; 178 - long val; 179 183 unsigned long flags; 180 184 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 181 - 182 - /* This shouldn't be happening */ 183 - if (WARN_ON(mode != CS_MODE_PERF)) 184 - return -EINVAL; 185 185 186 186 spin_lock_irqsave(&drvdata->spinlock, flags); 187 187 if (drvdata->reading) { ··· 184 194 goto out; 185 195 } 186 196 187 - val = local_xchg(&drvdata->mode, mode); 188 197 /* 189 198 * In Perf mode there can be only one writer per sink. There 190 199 * is also no need to continue if the ETB/ETR is already operated 191 200 * from sysFS. 192 201 */ 193 - if (val != CS_MODE_DISABLED) { 202 + if (drvdata->mode != CS_MODE_DISABLED) { 194 203 ret = -EINVAL; 195 204 goto out; 196 205 } 197 206 207 + drvdata->mode = CS_MODE_PERF; 198 208 tmc_etb_enable_hw(drvdata); 199 209 out: 200 210 spin_unlock_irqrestore(&drvdata->spinlock, flags); ··· 206 216 { 207 217 switch (mode) { 208 218 case CS_MODE_SYSFS: 209 - return tmc_enable_etf_sink_sysfs(csdev, mode); 219 + return tmc_enable_etf_sink_sysfs(csdev); 210 220 case CS_MODE_PERF: 211 - return tmc_enable_etf_sink_perf(csdev, mode); 221 + return tmc_enable_etf_sink_perf(csdev); 212 222 } 213 223 214 224 /* We shouldn't be here */ ··· 217 227 218 228 static void tmc_disable_etf_sink(struct coresight_device *csdev) 219 229 { 220 - long val; 221 230 unsigned long flags; 222 231 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 223 232 ··· 226 237 return; 227 238 } 228 239 229 - val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); 230 240 /* Disable the TMC only if it needs to */ 231 - if (val != CS_MODE_DISABLED) 241 + if (drvdata->mode != CS_MODE_DISABLED) { 232 242 tmc_etb_disable_hw(drvdata); 243 + drvdata->mode = CS_MODE_DISABLED; 244 + } 233 245 234 246 spin_unlock_irqrestore(&drvdata->spinlock, flags); 235 247 ··· 250 260 } 251 261 252 262 tmc_etf_enable_hw(drvdata); 253 - local_set(&drvdata->mode, CS_MODE_SYSFS); 263 + drvdata->mode = CS_MODE_SYSFS; 254 264 spin_unlock_irqrestore(&drvdata->spinlock, flags); 255 265 256 266 dev_info(drvdata->dev, "TMC-ETF enabled\n"); ··· 270 280 } 271 281 272 282 tmc_etf_disable_hw(drvdata); 273 - local_set(&drvdata->mode, CS_MODE_DISABLED); 283 + drvdata->mode = CS_MODE_DISABLED; 274 284 spin_unlock_irqrestore(&drvdata->spinlock, flags); 275 285 276 286 dev_info(drvdata->dev, "TMC disabled\n"); ··· 373 383 return; 374 384 375 385 /* This shouldn't happen */ 376 - if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) 386 + if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 377 387 return; 378 388 379 389 CS_UNLOCK(drvdata->base); ··· 494 504 495 505 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) 496 506 { 497 - long val; 498 507 enum tmc_mode mode; 499 508 int ret = 0; 500 509 unsigned long flags; ··· 517 528 goto out; 518 529 } 519 530 520 - val = local_read(&drvdata->mode); 521 531 /* Don't interfere if operated from Perf */ 522 - if (val == CS_MODE_PERF) { 532 + if (drvdata->mode == CS_MODE_PERF) { 523 533 ret = -EINVAL; 524 534 goto out; 525 535 } ··· 530 542 } 531 543 532 544 /* Disable the TMC if need be */ 533 - if (val == CS_MODE_SYSFS) 545 + if (drvdata->mode == CS_MODE_SYSFS) 534 546 tmc_etb_disable_hw(drvdata); 535 547 536 548 drvdata->reading = true; ··· 561 573 } 562 574 563 575 /* Re-enable the TMC if need be */ 564 - if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 576 + if (drvdata->mode == CS_MODE_SYSFS) { 565 577 /* 566 578 * The trace run will continue with the same allocated trace 567 579 * buffer. As such zero-out the buffer so that we don't end
+15 -28
drivers/hwtracing/coresight/coresight-tmc-etr.c
··· 86 86 * When operating in sysFS mode the content of the buffer needs to be 87 87 * read before the TMC is disabled. 88 88 */ 89 - if (local_read(&drvdata->mode) == CS_MODE_SYSFS) 89 + if (drvdata->mode == CS_MODE_SYSFS) 90 90 tmc_etr_dump_hw(drvdata); 91 91 tmc_disable_hw(drvdata); 92 92 93 93 CS_LOCK(drvdata->base); 94 94 } 95 95 96 - static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) 96 + static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) 97 97 { 98 98 int ret = 0; 99 99 bool used = false; 100 - long val; 101 100 unsigned long flags; 102 101 void __iomem *vaddr = NULL; 103 102 dma_addr_t paddr; 104 103 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 105 104 106 - /* This shouldn't be happening */ 107 - if (WARN_ON(mode != CS_MODE_SYSFS)) 108 - return -EINVAL; 109 105 110 106 /* 111 107 * If we don't have a buffer release the lock and allocate memory. ··· 130 134 goto out; 131 135 } 132 136 133 - val = local_xchg(&drvdata->mode, mode); 134 137 /* 135 138 * In sysFS mode we can have multiple writers per sink. Since this 136 139 * sink is already enabled no memory is needed and the HW need not be 137 140 * touched. 138 141 */ 139 - if (val == CS_MODE_SYSFS) 142 + if (drvdata->mode == CS_MODE_SYSFS) 140 143 goto out; 141 144 142 145 /* ··· 150 155 drvdata->buf = drvdata->vaddr; 151 156 } 152 157 153 - memset(drvdata->vaddr, 0, drvdata->size); 154 - 158 + drvdata->mode = CS_MODE_SYSFS; 155 159 tmc_etr_enable_hw(drvdata); 156 160 out: 157 161 spin_unlock_irqrestore(&drvdata->spinlock, flags); ··· 165 171 return ret; 166 172 } 167 173 168 - static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode) 174 + static int tmc_enable_etr_sink_perf(struct coresight_device *csdev) 169 175 { 170 176 int ret = 0; 171 - long val; 172 177 unsigned long flags; 173 178 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 174 - 175 - /* This shouldn't be happening */ 176 - if (WARN_ON(mode != CS_MODE_PERF)) 177 - return -EINVAL; 178 179 179 180 spin_lock_irqsave(&drvdata->spinlock, flags); 180 181 if (drvdata->reading) { ··· 177 188 goto out; 178 189 } 179 190 180 - val = local_xchg(&drvdata->mode, mode); 181 191 /* 182 192 * In Perf mode there can be only one writer per sink. There 183 193 * is also no need to continue if the ETR is already operated 184 194 * from sysFS. 185 195 */ 186 - if (val != CS_MODE_DISABLED) { 196 + if (drvdata->mode != CS_MODE_DISABLED) { 187 197 ret = -EINVAL; 188 198 goto out; 189 199 } 190 200 201 + drvdata->mode = CS_MODE_PERF; 191 202 tmc_etr_enable_hw(drvdata); 192 203 out: 193 204 spin_unlock_irqrestore(&drvdata->spinlock, flags); ··· 199 210 { 200 211 switch (mode) { 201 212 case CS_MODE_SYSFS: 202 - return tmc_enable_etr_sink_sysfs(csdev, mode); 213 + return tmc_enable_etr_sink_sysfs(csdev); 203 214 case CS_MODE_PERF: 204 - return tmc_enable_etr_sink_perf(csdev, mode); 215 + return tmc_enable_etr_sink_perf(csdev); 205 216 } 206 217 207 218 /* We shouldn't be here */ ··· 210 221 211 222 static void tmc_disable_etr_sink(struct coresight_device *csdev) 212 223 { 213 - long val; 214 224 unsigned long flags; 215 225 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 216 226 ··· 219 231 return; 220 232 } 221 233 222 - val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); 223 234 /* Disable the TMC only if it needs to */ 224 - if (val != CS_MODE_DISABLED) 235 + if (drvdata->mode != CS_MODE_DISABLED) { 225 236 tmc_etr_disable_hw(drvdata); 237 + drvdata->mode = CS_MODE_DISABLED; 238 + } 226 239 227 240 spin_unlock_irqrestore(&drvdata->spinlock, flags); 228 241 ··· 242 253 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) 243 254 { 244 255 int ret = 0; 245 - long val; 246 256 unsigned long flags; 247 257 248 258 /* config types are set a boot time and never change */ ··· 254 266 goto out; 255 267 } 256 268 257 - val = local_read(&drvdata->mode); 258 269 /* Don't interfere if operated from Perf */ 259 - if (val == CS_MODE_PERF) { 270 + if (drvdata->mode == CS_MODE_PERF) { 260 271 ret = -EINVAL; 261 272 goto out; 262 273 } ··· 267 280 } 268 281 269 282 /* Disable the TMC if need be */ 270 - if (val == CS_MODE_SYSFS) 283 + if (drvdata->mode == CS_MODE_SYSFS) 271 284 tmc_etr_disable_hw(drvdata); 272 285 273 286 drvdata->reading = true; ··· 290 303 spin_lock_irqsave(&drvdata->spinlock, flags); 291 304 292 305 /* RE-enable the TMC if need be */ 293 - if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { 306 + if (drvdata->mode == CS_MODE_SYSFS) { 294 307 /* 295 308 * The trace run will continue with the same allocated trace 296 309 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+1 -1
drivers/hwtracing/coresight/coresight-tmc.h
··· 117 117 void __iomem *vaddr; 118 118 u32 size; 119 119 u32 len; 120 - local_t mode; 120 + u32 mode; 121 121 enum tmc_config_type config_type; 122 122 enum tmc_mem_intf_width memwidth; 123 123 u32 trigger_cntr;
+68 -6
drivers/hwtracing/coresight/coresight.c
··· 368 368 return csdev; 369 369 } 370 370 371 + static int coresight_enabled_sink(struct device *dev, void *data) 372 + { 373 + bool *reset = data; 374 + struct coresight_device *csdev = to_coresight_device(dev); 375 + 376 + if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || 377 + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && 378 + csdev->activated) { 379 + /* 380 + * Now that we have a handle on the sink for this session, 381 + * disable the sysFS "enable_sink" flag so that possible 382 + * concurrent perf session that wish to use another sink don't 383 + * trip on it. Doing so has no ramification for the current 384 + * session. 385 + */ 386 + if (*reset) 387 + csdev->activated = false; 388 + 389 + return 1; 390 + } 391 + 392 + return 0; 393 + } 394 + 395 + /** 396 + * coresight_get_enabled_sink - returns the first enabled sink found on the bus 397 + * @deactivate: Whether the 'enable_sink' flag should be reset 398 + * 399 + * When operated from perf the deactivate parameter should be set to 'true'. 400 + * That way the "enabled_sink" flag of the sink that was selected can be reset, 401 + * allowing for other concurrent perf sessions to choose a different sink. 402 + * 403 + * When operated from sysFS users have full control and as such the deactivate 404 + * parameter should be set to 'false', hence mandating users to explicitly 405 + * clear the flag. 406 + */ 407 + struct coresight_device *coresight_get_enabled_sink(bool deactivate) 408 + { 409 + struct device *dev = NULL; 410 + 411 + dev = bus_find_device(&coresight_bustype, NULL, &deactivate, 412 + coresight_enabled_sink); 413 + 414 + return dev ? to_coresight_device(dev) : NULL; 415 + } 416 + 371 417 /** 372 418 * _coresight_build_path - recursively build a path from a @csdev to a sink. 373 419 * @csdev: The device to start from. ··· 426 380 * last one. 427 381 */ 428 382 static int _coresight_build_path(struct coresight_device *csdev, 383 + struct coresight_device *sink, 429 384 struct list_head *path) 430 385 { 431 386 int i; ··· 434 387 struct coresight_node *node; 435 388 436 389 /* An activated sink has been found. Enqueue the element */ 437 - if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || 438 - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated) 390 + if (csdev == sink) 439 391 goto out; 440 392 441 393 /* Not a sink - recursively explore each port found on this element */ 442 394 for (i = 0; i < csdev->nr_outport; i++) { 443 395 struct coresight_device *child_dev = csdev->conns[i].child_dev; 444 396 445 - if (child_dev && _coresight_build_path(child_dev, path) == 0) { 397 + if (child_dev && 398 + _coresight_build_path(child_dev, sink, path) == 0) { 446 399 found = true; 447 400 break; 448 401 } ··· 469 422 return 0; 470 423 } 471 424 472 - struct list_head *coresight_build_path(struct coresight_device *csdev) 425 + struct list_head *coresight_build_path(struct coresight_device *source, 426 + struct coresight_device *sink) 473 427 { 474 428 struct list_head *path; 475 429 int rc; 430 + 431 + if (!sink) 432 + return ERR_PTR(-EINVAL); 476 433 477 434 path = kzalloc(sizeof(struct list_head), GFP_KERNEL); 478 435 if (!path) ··· 484 433 485 434 INIT_LIST_HEAD(path); 486 435 487 - rc = _coresight_build_path(csdev, path); 436 + rc = _coresight_build_path(source, sink, path); 488 437 if (rc) { 489 438 kfree(path); 490 439 return ERR_PTR(rc); ··· 548 497 int coresight_enable(struct coresight_device *csdev) 549 498 { 550 499 int cpu, ret = 0; 500 + struct coresight_device *sink; 551 501 struct list_head *path; 552 502 553 503 mutex_lock(&coresight_mutex); ··· 560 508 if (csdev->enable) 561 509 goto out; 562 510 563 - path = coresight_build_path(csdev); 511 + /* 512 + * Search for a valid sink for this session but don't reset the 513 + * "enable_sink" flag in sysFS. Users get to do that explicitly. 514 + */ 515 + sink = coresight_get_enabled_sink(false); 516 + if (!sink) { 517 + ret = -EINVAL; 518 + goto out; 519 + } 520 + 521 + path = coresight_build_path(csdev, sink); 564 522 if (IS_ERR(path)) { 565 523 pr_err("building path(s) failed\n"); 566 524 ret = PTR_ERR(path);
+19 -9
drivers/hwtracing/intel_th/core.c
··· 29 29 #include "intel_th.h" 30 30 #include "debug.h" 31 31 32 + static bool host_mode __read_mostly; 33 + module_param(host_mode, bool, 0444); 34 + 32 35 static DEFINE_IDA(intel_th_ida); 33 36 34 37 static int intel_th_match(struct device *dev, struct device_driver *driver) ··· 383 380 /* 384 381 * Intel(R) Trace Hub subdevices 385 382 */ 386 - static struct intel_th_subdevice { 383 + static const struct intel_th_subdevice { 387 384 const char *name; 388 385 struct resource res[3]; 389 386 unsigned nres; ··· 530 527 { 531 528 struct resource res[3]; 532 529 unsigned int req = 0; 533 - int i, err; 530 + int src, dst, err; 534 531 535 532 /* create devices for each intel_th_subdevice */ 536 - for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) { 537 - struct intel_th_subdevice *subdev = &intel_th_subdevices[i]; 533 + for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) { 534 + const struct intel_th_subdevice *subdev = 535 + &intel_th_subdevices[src]; 538 536 struct intel_th_device *thdev; 539 537 int r; 538 + 539 + /* only allow SOURCE and SWITCH devices in host mode */ 540 + if (host_mode && subdev->type == INTEL_TH_OUTPUT) 541 + continue; 540 542 541 543 thdev = intel_th_device_alloc(th, subdev->type, subdev->name, 542 544 subdev->id); ··· 585 577 } 586 578 587 579 if (subdev->type == INTEL_TH_OUTPUT) { 588 - thdev->dev.devt = MKDEV(th->major, i); 580 + thdev->dev.devt = MKDEV(th->major, dst); 589 581 thdev->output.type = subdev->otype; 590 582 thdev->output.port = -1; 591 583 thdev->output.scratchpad = subdev->scrpd; 584 + } else if (subdev->type == INTEL_TH_SWITCH) { 585 + thdev->host_mode = host_mode; 592 586 } 593 587 594 588 err = device_add(&thdev->dev); ··· 607 597 req++; 608 598 } 609 599 610 - th->thdev[i] = thdev; 600 + th->thdev[dst++] = thdev; 611 601 } 612 602 613 603 return 0; 614 604 615 605 kill_subdevs: 616 - for (i-- ; i >= 0; i--) 617 - intel_th_device_remove(th->thdev[i]); 606 + for (; dst >= 0; dst--) 607 + intel_th_device_remove(th->thdev[dst]); 618 608 619 609 return err; 620 610 } ··· 727 717 728 718 intel_th_request_hub_module_flush(th); 729 719 for (i = 0; i < TH_SUBDEVICE_MAX; i++) 730 - if (th->thdev[i] != th->hub) 720 + if (th->thdev[i] && th->thdev[i] != th->hub) 731 721 intel_th_device_remove(th->thdev[i]); 732 722 733 723 intel_th_device_remove(th->hub);
+24 -2
drivers/hwtracing/intel_th/gth.c
··· 564 564 struct gth_device *gth = dev_get_drvdata(&thdev->dev); 565 565 int i, id; 566 566 567 + if (thdev->host_mode) 568 + return -EBUSY; 569 + 567 570 if (othdev->type != INTEL_TH_OUTPUT) 568 571 return -EINVAL; 569 572 ··· 602 599 { 603 600 struct gth_device *gth = dev_get_drvdata(&thdev->dev); 604 601 int port = othdev->output.port; 602 + 603 + if (thdev->host_mode) 604 + return; 605 605 606 606 spin_lock(&gth->gth_lock); 607 607 othdev->output.port = -1; ··· 660 654 gth->base = base; 661 655 spin_lock_init(&gth->gth_lock); 662 656 657 + /* 658 + * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE 659 + * bit. Either way, don't reset HW in this case, and don't export any 660 + * capture configuration attributes. Also, refuse to assign output 661 + * drivers to ports, see intel_th_gth_assign(). 662 + */ 663 + if (thdev->host_mode) 664 + goto done; 665 + 663 666 ret = intel_th_gth_reset(gth); 664 - if (ret) 665 - return ret; 667 + if (ret) { 668 + if (ret != -EBUSY) 669 + return ret; 670 + 671 + thdev->host_mode = true; 672 + 673 + goto done; 674 + } 666 675 667 676 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) 668 677 gth->master[i] = -1; ··· 698 677 return -ENOMEM; 699 678 } 700 679 680 + done: 701 681 dev_set_drvdata(dev, gth); 702 682 703 683 return 0;
+4
drivers/hwtracing/intel_th/intel_th.h
··· 54 54 * @num_resources: number of resources in @resource array 55 55 * @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH} 56 56 * @id: device instance or -1 57 + * @host_mode: Intel TH is controlled by an external debug host 57 58 * @output: output descriptor for INTEL_TH_OUTPUT devices 58 59 * @name: device name to match the driver 59 60 */ ··· 64 63 unsigned int num_resources; 65 64 unsigned int type; 66 65 int id; 66 + 67 + /* INTEL_TH_SWITCH specific */ 68 + bool host_mode; 67 69 68 70 /* INTEL_TH_OUTPUT specific */ 69 71 struct intel_th_output output;
+5 -3
drivers/hwtracing/stm/core.c
··· 361 361 struct stm_file *stmf; 362 362 struct device *dev; 363 363 unsigned int major = imajor(inode); 364 - int err = -ENODEV; 364 + int err = -ENOMEM; 365 365 366 366 dev = class_find_device(&stm_class, NULL, &major, major_match); 367 367 if (!dev) ··· 369 369 370 370 stmf = kzalloc(sizeof(*stmf), GFP_KERNEL); 371 371 if (!stmf) 372 - return -ENOMEM; 372 + goto err_put_device; 373 373 374 + err = -ENODEV; 374 375 stm_output_init(&stmf->output); 375 376 stmf->stm = to_stm_device(dev); 376 377 ··· 383 382 return nonseekable_open(inode, file); 384 383 385 384 err_free: 385 + kfree(stmf); 386 + err_put_device: 386 387 /* matches class_find_device() above */ 387 388 put_device(dev); 388 - kfree(stmf); 389 389 390 390 return err; 391 391 }
+2 -8
drivers/lightnvm/core.c
··· 22 22 #include <linux/types.h> 23 23 #include <linux/sem.h> 24 24 #include <linux/bitmap.h> 25 - #include <linux/module.h> 25 + #include <linux/moduleparam.h> 26 26 #include <linux/miscdevice.h> 27 27 #include <linux/lightnvm.h> 28 28 #include <linux/sched/sysctl.h> ··· 1129 1129 .nodename = "lightnvm/control", 1130 1130 .fops = &_ctl_fops, 1131 1131 }; 1132 - module_misc_device(_nvm_misc); 1133 - 1134 - MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR); 1135 - 1136 - MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>"); 1137 - MODULE_LICENSE("GPL v2"); 1138 - MODULE_VERSION("0.1"); 1132 + builtin_misc_device(_nvm_misc);
+1 -1
drivers/mcb/mcb-parse.c
··· 149 149 reg = readl(*base); 150 150 151 151 bar_count = BAR_CNT(reg); 152 - if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX) 152 + if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX) 153 153 return -ENODEV; 154 154 155 155 c = kcalloc(bar_count, sizeof(struct chameleon_bar),
-1
drivers/misc/genwqe/card_base.h
··· 41 41 #include "genwqe_driver.h" 42 42 43 43 #define GENWQE_MSI_IRQS 4 /* Just one supported, no MSIx */ 44 - #define GENWQE_FLAG_MSI_ENABLED (1 << 0) 45 44 46 45 #define GENWQE_MAX_VFS 15 /* maximum 15 VFs are possible */ 47 46 #define GENWQE_MAX_FUNCS 16 /* 1 PF and 15 VFs */
+2 -10
drivers/misc/genwqe/card_utils.c
··· 740 740 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) 741 741 { 742 742 int rc; 743 - struct pci_dev *pci_dev = cd->pci_dev; 744 743 745 - rc = pci_enable_msi_range(pci_dev, 1, count); 744 + rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI); 746 745 if (rc < 0) 747 746 return rc; 748 - 749 - cd->flags |= GENWQE_FLAG_MSI_ENABLED; 750 747 return 0; 751 748 } 752 749 ··· 753 756 */ 754 757 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd) 755 758 { 756 - struct pci_dev *pci_dev = cd->pci_dev; 757 - 758 - if (cd->flags & GENWQE_FLAG_MSI_ENABLED) { 759 - pci_disable_msi(pci_dev); 760 - cd->flags &= ~GENWQE_FLAG_MSI_ENABLED; 761 - } 759 + pci_free_irq_vectors(cd->pci_dev); 762 760 } 763 761 764 762 /**
+2 -1
drivers/misc/lkdtm_bugs.c
··· 85 85 /* Use default char array length that triggers stack protection. */ 86 86 char data[8]; 87 87 88 - memset((void *)data, 0, 64); 88 + memset((void *)data, 'a', 64); 89 + pr_info("Corrupted stack with '%16s'...\n", data); 89 90 } 90 91 91 92 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+5 -2
drivers/misc/lkdtm_perms.c
··· 60 60 61 61 static void execute_user_location(void *dst) 62 62 { 63 + int copied; 64 + 63 65 /* Intentionally crossing kernel/user memory boundary. */ 64 66 void (*func)(void) = dst; 65 67 66 68 pr_info("attempting ok execution at %p\n", do_nothing); 67 69 do_nothing(); 68 70 69 - if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) 71 + copied = access_process_vm(current, (unsigned long)dst, do_nothing, 72 + EXEC_SIZE, FOLL_WRITE); 73 + if (copied < EXEC_SIZE) 70 74 return; 71 - flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); 72 75 pr_info("attempting bad execution at %p\n", func); 73 76 func(); 74 77 }
+1 -1
drivers/misc/mei/amthif.c
··· 144 144 dev->iamthif_state = MEI_IAMTHIF_WRITING; 145 145 cl->fp = cb->fp; 146 146 147 - ret = mei_cl_write(cl, cb, false); 147 + ret = mei_cl_write(cl, cb); 148 148 if (ret < 0) 149 149 return ret; 150 150
+98 -2
drivers/misc/mei/bus-fixup.c
··· 38 38 #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ 39 39 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) 40 40 41 + #define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \ 42 + 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb) 43 + 41 44 #define MEI_UUID_ANY NULL_UUID_LE 42 45 43 46 /** ··· 70 67 dev_dbg(&cldev->dev, "running hook %s\n", __func__); 71 68 72 69 cldev->do_match = 0; 70 + } 71 + 72 + #define OSTYPE_LINUX 2 73 + struct mei_os_ver { 74 + __le16 build; 75 + __le16 reserved1; 76 + u8 os_type; 77 + u8 major; 78 + u8 minor; 79 + u8 reserved2; 80 + } __packed; 81 + 82 + #define MKHI_FEATURE_PTT 0x10 83 + 84 + struct mkhi_rule_id { 85 + __le16 rule_type; 86 + u8 feature_id; 87 + u8 reserved; 88 + } __packed; 89 + 90 + struct mkhi_fwcaps { 91 + struct mkhi_rule_id id; 92 + u8 len; 93 + u8 data[0]; 94 + } __packed; 95 + 96 + #define MKHI_FWCAPS_GROUP_ID 0x3 97 + #define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 98 + struct mkhi_msg_hdr { 99 + u8 group_id; 100 + u8 command; 101 + u8 reserved; 102 + u8 result; 103 + } __packed; 104 + 105 + struct mkhi_msg { 106 + struct mkhi_msg_hdr hdr; 107 + u8 data[0]; 108 + } __packed; 109 + 110 + static int mei_osver(struct mei_cl_device *cldev) 111 + { 112 + int ret; 113 + const size_t size = sizeof(struct mkhi_msg_hdr) + 114 + sizeof(struct mkhi_fwcaps) + 115 + sizeof(struct mei_os_ver); 116 + size_t length = 8; 117 + char buf[size]; 118 + struct mkhi_msg *req; 119 + struct mkhi_fwcaps *fwcaps; 120 + struct mei_os_ver *os_ver; 121 + unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL; 122 + 123 + memset(buf, 0, size); 124 + 125 + req = (struct mkhi_msg *)buf; 126 + req->hdr.group_id = MKHI_FWCAPS_GROUP_ID; 127 + req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD; 128 + 129 + fwcaps = (struct mkhi_fwcaps *)req->data; 130 + 131 + fwcaps->id.rule_type = 0x0; 132 + fwcaps->id.feature_id = MKHI_FEATURE_PTT; 133 + fwcaps->len = sizeof(*os_ver); 134 + os_ver = (struct mei_os_ver *)fwcaps->data; 135 + os_ver->os_type = OSTYPE_LINUX; 136 + 137 + ret = __mei_cl_send(cldev->cl, buf, size, mode); 138 + if (ret < 0) 139 + return ret; 140 + 141 + ret = __mei_cl_recv(cldev->cl, buf, length, 0); 142 + if (ret < 0) 143 + return ret; 144 + 145 + return 0; 146 + } 147 + 148 + static void mei_mkhi_fix(struct mei_cl_device *cldev) 149 + { 150 + int ret; 151 + 152 + ret = mei_cldev_enable(cldev); 153 + if (ret) 154 + return; 155 + 156 + ret = mei_osver(cldev); 157 + if (ret) 158 + dev_err(&cldev->dev, "OS version command failed %d\n", ret); 159 + 160 + mei_cldev_disable(cldev); 73 161 } 74 162 75 163 /** ··· 256 162 257 163 WARN_ON(mutex_is_locked(&bus->device_lock)); 258 164 259 - ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1); 165 + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 166 + MEI_CL_IO_TX_BLOCKING); 260 167 if (ret < 0) { 261 168 dev_err(bus->dev, "Could not send IF version cmd\n"); 262 169 return ret; ··· 272 177 return -ENOMEM; 273 178 274 179 ret = 0; 275 - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); 180 + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0); 276 181 if (bytes_recv < if_version_length) { 277 182 dev_err(bus->dev, "Could not read IF version\n"); 278 183 ret = -EIO; ··· 404 309 MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), 405 310 MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), 406 311 MEI_FIXUP(MEI_UUID_WD, mei_wd), 312 + MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), 407 313 }; 408 314 409 315 /**
+134 -85
drivers/misc/mei/bus.c
··· 36 36 * @cl: host client 37 37 * @buf: buffer to send 38 38 * @length: buffer length 39 - * @blocking: wait for write completion 39 + * @mode: sending mode 40 40 * 41 41 * Return: written size bytes or < 0 on error 42 42 */ 43 43 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, 44 - bool blocking) 44 + unsigned int mode) 45 45 { 46 46 struct mei_device *bus; 47 47 struct mei_cl_cb *cb; ··· 80 80 goto out; 81 81 } 82 82 83 + cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); 84 + cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); 83 85 memcpy(cb->buf.data, buf, length); 84 86 85 - rets = mei_cl_write(cl, cb, blocking); 87 + rets = mei_cl_write(cl, cb); 86 88 87 89 out: 88 90 mutex_unlock(&bus->device_lock); ··· 98 96 * @cl: host client 99 97 * @buf: buffer to receive 100 98 * @length: buffer length 99 + * @mode: io mode 101 100 * 102 101 * Return: read size in bytes of < 0 on error 103 102 */ 104 - ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) 103 + ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, 104 + unsigned int mode) 105 105 { 106 106 struct mei_device *bus; 107 107 struct mei_cl_cb *cb; 108 108 size_t r_length; 109 109 ssize_t rets; 110 + bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK); 110 111 111 112 if (WARN_ON(!cl || !cl->dev)) 112 113 return -ENODEV; ··· 129 124 rets = mei_cl_read_start(cl, length, NULL); 130 125 if (rets && rets != -EBUSY) 131 126 goto out; 127 + 128 + if (nonblock) { 129 + rets = -EAGAIN; 130 + goto out; 131 + } 132 132 133 133 /* wait on event only if there is no other waiter */ 134 134 /* synchronized under device mutex */ ··· 195 185 { 196 186 struct mei_cl *cl = cldev->cl; 197 187 198 - if (cl == NULL) 199 - return -ENODEV; 200 - 201 - return __mei_cl_send(cl, buf, length, 1); 188 + return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING); 202 189 } 203 190 EXPORT_SYMBOL_GPL(mei_cldev_send); 191 + 192 + /** 193 + * mei_cldev_recv_nonblock - non block client receive (read) 194 + * 195 + * @cldev: me client device 196 + * @buf: buffer to receive 197 + * @length: buffer length 198 + * 199 + * Return: read size in bytes of < 0 on error 200 + * -EAGAIN if function will block. 201 + */ 202 + ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, 203 + size_t length) 204 + { 205 + struct mei_cl *cl = cldev->cl; 206 + 207 + return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK); 208 + } 209 + EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); 204 210 205 211 /** 206 212 * mei_cldev_recv - client receive (read) ··· 231 205 { 232 206 struct mei_cl *cl = cldev->cl; 233 207 234 - if (cl == NULL) 235 - return -ENODEV; 236 - 237 - return __mei_cl_recv(cl, buf, length); 208 + return __mei_cl_recv(cl, buf, length, 0); 238 209 } 239 210 EXPORT_SYMBOL_GPL(mei_cldev_recv); 240 211 241 212 /** 242 - * mei_cl_bus_event_work - dispatch rx event for a bus device 243 - * and schedule new work 213 + * mei_cl_bus_rx_work - dispatch rx event for a bus device 244 214 * 245 215 * @work: work 246 216 */ 247 - static void mei_cl_bus_event_work(struct work_struct *work) 217 + static void mei_cl_bus_rx_work(struct work_struct *work) 248 218 { 249 219 struct mei_cl_device *cldev; 250 220 struct mei_device *bus; 251 221 252 - cldev = container_of(work, struct mei_cl_device, event_work); 222 + cldev = container_of(work, struct mei_cl_device, rx_work); 253 223 254 224 bus = cldev->bus; 255 225 256 - if (cldev->event_cb) 257 - cldev->event_cb(cldev, cldev->events, cldev->event_context); 226 + if (cldev->rx_cb) 227 + cldev->rx_cb(cldev); 258 228 259 - cldev->events = 0; 229 + mutex_lock(&bus->device_lock); 230 + mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 231 + mutex_unlock(&bus->device_lock); 232 + } 260 233 261 - /* Prepare for the next read */ 262 - if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { 263 - mutex_lock(&bus->device_lock); 264 - mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 265 - mutex_unlock(&bus->device_lock); 266 - } 234 + /** 235 + * mei_cl_bus_notif_work - dispatch FW notif event for a bus device 236 + * 237 + * @work: work 238 + */ 239 + static void mei_cl_bus_notif_work(struct work_struct *work) 240 + { 241 + struct mei_cl_device *cldev; 242 + 243 + cldev = container_of(work, struct mei_cl_device, notif_work); 244 + 245 + if (cldev->notif_cb) 246 + cldev->notif_cb(cldev); 267 247 } 268 248 269 249 /** ··· 284 252 { 285 253 struct mei_cl_device *cldev = cl->cldev; 286 254 287 - if (!cldev || !cldev->event_cb) 288 - return false; 289 - 290 - if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) 255 + if (!cldev || !cldev->notif_cb) 291 256 return false; 292 257 293 258 if (!cl->notify_ev) 294 259 return false; 295 260 296 - set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); 297 - 298 - schedule_work(&cldev->event_work); 261 + schedule_work(&cldev->notif_work); 299 262 300 263 cl->notify_ev = false; 301 264 ··· 298 271 } 299 272 300 273 /** 301 - * mei_cl_bus_rx_event - schedule rx event 274 + * mei_cl_bus_rx_event - schedule rx event 302 275 * 303 276 * @cl: host client 304 277 * ··· 309 282 { 310 283 struct mei_cl_device *cldev = cl->cldev; 311 284 312 - if (!cldev || !cldev->event_cb) 285 + if (!cldev || !cldev->rx_cb) 313 286 return false; 314 287 315 - if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) 316 - return false; 317 - 318 - set_bit(MEI_CL_EVENT_RX, &cldev->events); 319 - 320 - schedule_work(&cldev->event_work); 288 + schedule_work(&cldev->rx_work); 321 289 322 290 return true; 323 291 } 324 292 325 293 /** 326 - * mei_cldev_register_event_cb - register event callback 294 + * mei_cldev_register_rx_cb - register Rx event callback 327 295 * 328 296 * @cldev: me client devices 329 - * @event_cb: callback function 330 - * @events_mask: requested events bitmask 331 - * @context: driver context data 297 + * @rx_cb: callback function 332 298 * 333 299 * Return: 0 on success 334 300 * -EALREADY if an callback is already registered 335 301 * <0 on other errors 336 302 */ 337 - int mei_cldev_register_event_cb(struct mei_cl_device *cldev, 338 - unsigned long events_mask, 339 - mei_cldev_event_cb_t event_cb, void *context) 303 + int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) 340 304 { 341 305 struct mei_device *bus = cldev->bus; 342 306 int ret; 343 307 344 - if (cldev->event_cb) 308 + if (!rx_cb) 309 + return -EINVAL; 310 + if (cldev->rx_cb) 345 311 return -EALREADY; 346 312 347 - cldev->events = 0; 348 - cldev->events_mask = events_mask; 349 - cldev->event_cb = event_cb; 350 - cldev->event_context = context; 351 - INIT_WORK(&cldev->event_work, mei_cl_bus_event_work); 313 + cldev->rx_cb = rx_cb; 314 + INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); 352 315 353 - if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { 354 - mutex_lock(&bus->device_lock); 355 - ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 356 - mutex_unlock(&bus->device_lock); 357 - if (ret && ret != -EBUSY) 358 - return ret; 359 - } 360 - 361 - if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { 362 - mutex_lock(&bus->device_lock); 363 - ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); 364 - mutex_unlock(&bus->device_lock); 365 - if (ret) 366 - return ret; 367 - } 316 + mutex_lock(&bus->device_lock); 317 + ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 318 + mutex_unlock(&bus->device_lock); 319 + if (ret && ret != -EBUSY) 320 + return ret; 368 321 369 322 return 0; 370 323 } 371 - EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb); 324 + EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb); 325 + 326 + /** 327 + * mei_cldev_register_notif_cb - register FW notification event callback 328 + * 329 + * @cldev: me client devices 330 + * @notif_cb: callback function 331 + * 332 + * Return: 0 on success 333 + * -EALREADY if an callback is already registered 334 + * <0 on other errors 335 + */ 336 + int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 337 + mei_cldev_cb_t notif_cb) 338 + { 339 + struct mei_device *bus = cldev->bus; 340 + int ret; 341 + 342 + if (!notif_cb) 343 + return -EINVAL; 344 + 345 + if (cldev->notif_cb) 346 + return -EALREADY; 347 + 348 + cldev->notif_cb = notif_cb; 349 + INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work); 350 + 351 + mutex_lock(&bus->device_lock); 352 + ret = mei_cl_notify_request(cldev->cl, NULL, 1); 353 + mutex_unlock(&bus->device_lock); 354 + if (ret) 355 + return ret; 356 + 357 + return 0; 358 + } 359 + EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb); 372 360 373 361 /** 374 362 * mei_cldev_get_drvdata - driver data getter ··· 445 403 */ 446 404 bool mei_cldev_enabled(struct mei_cl_device *cldev) 447 405 { 448 - return cldev->cl && mei_cl_is_connected(cldev->cl); 406 + return mei_cl_is_connected(cldev->cl); 449 407 } 450 408 EXPORT_SYMBOL_GPL(mei_cldev_enabled); 451 409 ··· 465 423 466 424 cl = cldev->cl; 467 425 468 - if (!cl) { 426 + if (cl->state == MEI_FILE_UNINITIALIZED) { 469 427 mutex_lock(&bus->device_lock); 470 - cl = mei_cl_alloc_linked(bus); 428 + ret = mei_cl_link(cl); 471 429 mutex_unlock(&bus->device_lock); 472 - if (IS_ERR(cl)) 473 - return PTR_ERR(cl); 430 + if (ret) 431 + return ret; 474 432 /* update pointers */ 475 - cldev->cl = cl; 476 433 cl->cldev = cldev; 477 434 } 478 435 ··· 512 471 struct mei_cl *cl; 513 472 int err; 514 473 515 - if (!cldev || !cldev->cl) 474 + if (!cldev) 516 475 return -ENODEV; 517 476 518 477 cl = cldev->cl; 519 478 520 479 bus = cldev->bus; 521 480 522 - cldev->event_cb = NULL; 523 - 524 481 mutex_lock(&bus->device_lock); 525 482 526 483 if (!mei_cl_is_connected(cl)) { 527 - dev_err(bus->dev, "Already disconnected"); 484 + dev_dbg(bus->dev, "Already disconnected"); 528 485 err = 0; 529 486 goto out; 530 487 } ··· 535 496 /* Flush queues and remove any pending read */ 536 497 mei_cl_flush_queues(cl, NULL); 537 498 mei_cl_unlink(cl); 538 - 539 - kfree(cl); 540 - cldev->cl = NULL; 541 499 542 500 mutex_unlock(&bus->device_lock); 543 501 return err; ··· 665 629 if (!cldev || !dev->driver) 666 630 return 0; 667 631 668 - if (cldev->event_cb) { 669 - cldev->event_cb = NULL; 670 - cancel_work_sync(&cldev->event_work); 632 + if (cldev->rx_cb) { 633 + cancel_work_sync(&cldev->rx_work); 634 + cldev->rx_cb = NULL; 635 + } 636 + if (cldev->notif_cb) { 637 + cancel_work_sync(&cldev->notif_work); 638 + cldev->notif_cb = NULL; 671 639 } 672 640 673 641 cldrv = to_mei_cl_driver(dev->driver); ··· 794 754 795 755 mei_me_cl_put(cldev->me_cl); 796 756 mei_dev_bus_put(cldev->bus); 757 + kfree(cldev->cl); 797 758 kfree(cldev); 798 759 } 799 760 ··· 827 786 struct mei_me_client *me_cl) 828 787 { 829 788 struct mei_cl_device *cldev; 789 + struct mei_cl *cl; 830 790 831 791 cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL); 832 792 if (!cldev) 833 793 return NULL; 794 + 795 + cl = mei_cl_allocate(bus); 796 + if (!cl) { 797 + kfree(cldev); 798 + return NULL; 799 + } 834 800 835 801 device_initialize(&cldev->dev); 836 802 cldev->dev.parent = bus->dev; ··· 845 797 cldev->dev.type = &mei_cl_device_type; 846 798 cldev->bus = mei_dev_bus_get(bus); 847 799 cldev->me_cl = mei_me_cl_get(me_cl); 800 + cldev->cl = cl; 848 801 mei_cl_bus_set_name(cldev); 849 802 cldev->is_added = 0; 850 803 INIT_LIST_HEAD(&cldev->bus_list);
+14 -9
drivers/misc/mei/client.c
··· 425 425 * 426 426 * @cl: host client 427 427 * @length: size of the buffer 428 - * @type: operation type 428 + * @fop_type: operation type 429 429 * @fp: associated file pointer (might be NULL) 430 430 * 431 431 * Return: cb on success and NULL on failure ··· 459 459 * 460 460 * @cl: host client 461 461 * @length: size of the buffer 462 - * @type: operation type 462 + * @fop_type: operation type 463 463 * @fp: associated file pointer (might be NULL) 464 464 * 465 465 * Return: cb on success and NULL on failure ··· 571 571 INIT_LIST_HEAD(&cl->rd_pending); 572 572 INIT_LIST_HEAD(&cl->link); 573 573 cl->writing_state = MEI_IDLE; 574 - cl->state = MEI_FILE_INITIALIZING; 574 + cl->state = MEI_FILE_UNINITIALIZED; 575 575 cl->dev = dev; 576 576 } 577 577 ··· 672 672 673 673 list_del_init(&cl->link); 674 674 675 - cl->state = MEI_FILE_INITIALIZING; 675 + cl->state = MEI_FILE_UNINITIALIZED; 676 + cl->writing_state = MEI_IDLE; 677 + 678 + WARN_ON(!list_empty(&cl->rd_completed) || 679 + !list_empty(&cl->rd_pending) || 680 + !list_empty(&cl->link)); 676 681 677 682 return 0; 678 683 } ··· 691 686 692 687 pm_runtime_mark_last_busy(dev->dev); 693 688 dev_dbg(dev->dev, "rpm: autosuspend\n"); 694 - pm_runtime_autosuspend(dev->dev); 689 + pm_request_autosuspend(dev->dev); 695 690 } 696 691 697 692 /** ··· 761 756 struct mei_device *dev = cl->dev; 762 757 763 758 if (cl->state == MEI_FILE_DISCONNECTED || 764 - cl->state == MEI_FILE_INITIALIZING) 759 + cl->state <= MEI_FILE_INITIALIZING) 765 760 return; 766 761 767 762 cl->state = MEI_FILE_DISCONNECTED; ··· 1603 1598 * 1604 1599 * @cl: host client 1605 1600 * @cb: write callback with filled data 1606 - * @blocking: block until completed 1607 1601 * 1608 1602 * Return: number of bytes sent on success, <0 on failure. 1609 1603 */ 1610 - int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 1604 + int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1611 1605 { 1612 1606 struct mei_device *dev; 1613 1607 struct mei_msg_data *buf; 1614 1608 struct mei_msg_hdr mei_hdr; 1615 1609 int size; 1616 1610 int rets; 1617 - 1611 + bool blocking; 1618 1612 1619 1613 if (WARN_ON(!cl || !cl->dev)) 1620 1614 return -ENODEV; ··· 1625 1621 1626 1622 buf = &cb->buf; 1627 1623 size = buf->size; 1624 + blocking = cb->blocking; 1628 1625 1629 1626 cl_dbg(dev, cl, "size=%d\n", size); 1630 1627
+1 -1
drivers/misc/mei/client.h
··· 219 219 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); 220 220 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, 221 221 struct mei_cl_cb *cmpl_list); 222 - int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); 222 + int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); 223 223 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 224 224 struct mei_cl_cb *cmpl_list); 225 225
+2
drivers/misc/mei/hw-me-regs.h
··· 122 122 #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ 123 123 #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ 124 124 125 + #define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ 126 + 125 127 #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 126 128 #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 127 129
+68 -22
drivers/misc/mei/hw-me.c
··· 246 246 return hw->pg_state; 247 247 } 248 248 249 + static inline u32 me_intr_src(u32 hcsr) 250 + { 251 + return hcsr & H_CSR_IS_MASK; 252 + } 253 + 254 + /** 255 + * me_intr_disable - disables mei device interrupts 256 + * using supplied hcsr register value. 257 + * 258 + * @dev: the device structure 259 + * @hcsr: supplied hcsr register value 260 + */ 261 + static inline void me_intr_disable(struct mei_device *dev, u32 hcsr) 262 + { 263 + hcsr &= ~H_CSR_IE_MASK; 264 + mei_hcsr_set(dev, hcsr); 265 + } 266 + 267 + /** 268 + * mei_me_intr_clear - clear and stop interrupts 269 + * 270 + * @dev: the device structure 271 + * @hcsr: supplied hcsr register value 272 + */ 273 + static inline void me_intr_clear(struct mei_device *dev, u32 hcsr) 274 + { 275 + if (me_intr_src(hcsr)) 276 + mei_hcsr_write(dev, hcsr); 277 + } 278 + 249 279 /** 250 280 * mei_me_intr_clear - clear and stop interrupts 251 281 * ··· 285 255 { 286 256 u32 hcsr = mei_hcsr_read(dev); 287 257 288 - if (hcsr & H_CSR_IS_MASK) 289 - mei_hcsr_write(dev, hcsr); 258 + me_intr_clear(dev, hcsr); 290 259 } 291 260 /** 292 261 * mei_me_intr_enable - enables mei device interrupts ··· 309 280 { 310 281 u32 hcsr = mei_hcsr_read(dev); 311 282 312 - hcsr &= ~H_CSR_IE_MASK; 313 - mei_hcsr_set(dev, hcsr); 283 + me_intr_disable(dev, hcsr); 284 + } 285 + 286 + /** 287 + * mei_me_synchronize_irq - wait for pending IRQ handlers 288 + * 289 + * @dev: the device structure 290 + */ 291 + static void mei_me_synchronize_irq(struct mei_device *dev) 292 + { 293 + struct pci_dev *pdev = to_pci_dev(dev->dev); 294 + 295 + synchronize_irq(pdev->irq); 314 296 } 315 297 316 298 /** ··· 490 450 491 451 492 452 /** 493 - * mei_me_write_message - writes a message to mei device. 453 + * mei_me_hbuf_write - writes a message to host hw buffer. 494 454 * 495 455 * @dev: the device structure 496 456 * @header: mei HECI header of message ··· 498 458 * 499 459 * Return: -EIO if write has failed 500 460 */ 501 - static int mei_me_write_message(struct mei_device *dev, 502 - struct mei_msg_hdr *header, 503 - unsigned char *buf) 461 + static int mei_me_hbuf_write(struct mei_device *dev, 462 + struct mei_msg_hdr *header, 463 + const unsigned char *buf) 504 464 { 505 465 unsigned long rem; 506 466 unsigned long length = header->length; ··· 996 956 * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler 997 957 * 998 958 * @dev: the device structure 959 + * @intr_source: interrupt source 999 960 */ 1000 - static void mei_me_d0i3_intr(struct mei_device *dev) 961 + static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source) 1001 962 { 1002 963 struct mei_me_hw *hw = to_me_hw(dev); 1003 964 1004 965 if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT && 1005 - (hw->intr_source & H_D0I3C_IS)) { 966 + (intr_source & H_D0I3C_IS)) { 1006 967 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED; 1007 968 if (hw->pg_state == MEI_PG_ON) { 1008 969 hw->pg_state = MEI_PG_OFF; ··· 1022 981 wake_up(&dev->wait_pg); 1023 982 } 1024 983 1025 - if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) { 984 + if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) { 1026 985 /* 1027 986 * HW sent some data and we are in D0i3, so 1028 987 * we got here because of HW initiated exit from D0i3. ··· 1037 996 * mei_me_pg_intr - perform pg processing in interrupt thread handler 1038 997 * 1039 998 * @dev: the device structure 999 + * @intr_source: interrupt source 1040 1000 */ 1041 - static void mei_me_pg_intr(struct mei_device *dev) 1001 + static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source) 1042 1002 { 1043 1003 struct mei_me_hw *hw = to_me_hw(dev); 1044 1004 1045 1005 if (hw->d0i3_supported) 1046 - mei_me_d0i3_intr(dev); 1006 + mei_me_d0i3_intr(dev, intr_source); 1047 1007 else 1048 1008 mei_me_pg_legacy_intr(dev); 1049 1009 } ··· 1163 1121 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id) 1164 1122 { 1165 1123 struct mei_device *dev = (struct mei_device *)dev_id; 1166 - struct mei_me_hw *hw = to_me_hw(dev); 1167 1124 u32 hcsr; 1168 1125 1169 1126 hcsr = mei_hcsr_read(dev); 1170 - if (!(hcsr & H_CSR_IS_MASK)) 1127 + if (!me_intr_src(hcsr)) 1171 1128 return IRQ_NONE; 1172 1129 1173 - hw->intr_source = hcsr & H_CSR_IS_MASK; 1174 - dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source); 1130 + dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr)); 1175 1131 1176 - /* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */ 1177 - mei_hcsr_write(dev, hcsr); 1178 - 1132 + /* disable interrupts on device */ 1133 + me_intr_disable(dev, hcsr); 1179 1134 return IRQ_WAKE_THREAD; 1180 1135 } 1181 1136 ··· 1191 1152 struct mei_device *dev = (struct mei_device *) dev_id; 1192 1153 struct mei_cl_cb complete_list; 1193 1154 s32 slots; 1155 + u32 hcsr; 1194 1156 int rets = 0; 1195 1157 1196 1158 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n"); 1197 1159 /* initialize our complete list */ 1198 1160 mutex_lock(&dev->device_lock); 1161 + 1162 + hcsr = mei_hcsr_read(dev); 1163 + me_intr_clear(dev, hcsr); 1164 + 1199 1165 mei_io_list_init(&complete_list); 1200 1166 1201 1167 /* check if ME wants a reset */ ··· 1210 1166 goto end; 1211 1167 } 1212 1168 1213 - mei_me_pg_intr(dev); 1169 + mei_me_pg_intr(dev, me_intr_src(hcsr)); 1214 1170 1215 1171 /* check if we need to start the dev */ 1216 1172 if (!mei_host_is_ready(dev)) { ··· 1260 1216 1261 1217 end: 1262 1218 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets); 1219 + mei_me_intr_enable(dev); 1263 1220 mutex_unlock(&dev->device_lock); 1264 1221 return IRQ_HANDLED; 1265 1222 } ··· 1283 1238 .intr_clear = mei_me_intr_clear, 1284 1239 .intr_enable = mei_me_intr_enable, 1285 1240 .intr_disable = mei_me_intr_disable, 1241 + .synchronize_irq = mei_me_synchronize_irq, 1286 1242 1287 1243 .hbuf_free_slots = mei_me_hbuf_empty_slots, 1288 1244 .hbuf_is_ready = mei_me_hbuf_is_empty, 1289 1245 .hbuf_max_len = mei_me_hbuf_max_len, 1290 1246 1291 - .write = mei_me_write_message, 1247 + .write = mei_me_hbuf_write, 1292 1248 1293 1249 .rdbuf_full_slots = mei_me_count_full_read_slots, 1294 1250 .read_hdr = mei_me_mecbrw_read,
-2
drivers/misc/mei/hw-me.h
··· 51 51 * 52 52 * @cfg: per device generation config and ops 53 53 * @mem_addr: io memory address 54 - * @intr_source: interrupt source 55 54 * @pg_state: power gating state 56 55 * @d0i3_supported: di03 support 57 56 */ 58 57 struct mei_me_hw { 59 58 const struct mei_cfg *cfg; 60 59 void __iomem *mem_addr; 61 - u32 intr_source; 62 60 enum mei_pg_state pg_state; 63 61 bool d0i3_supported; 64 62 };
+16 -2
drivers/misc/mei/hw-txe.c
··· 19 19 #include <linux/ktime.h> 20 20 #include <linux/delay.h> 21 21 #include <linux/kthread.h> 22 - #include <linux/irqreturn.h> 22 + #include <linux/interrupt.h> 23 23 #include <linux/pm_runtime.h> 24 24 25 25 #include <linux/mei.h> ··· 441 441 } 442 442 443 443 /** 444 + * mei_txe_synchronize_irq - wait for pending IRQ handlers 445 + * 446 + * @dev: the device structure 447 + */ 448 + static void mei_txe_synchronize_irq(struct mei_device *dev) 449 + { 450 + struct pci_dev *pdev = to_pci_dev(dev->dev); 451 + 452 + synchronize_irq(pdev->irq); 453 + } 454 + 455 + /** 444 456 * mei_txe_pending_interrupts - check if there are pending interrupts 445 457 * only Aliveness, Input ready, and output doorbell are of relevance 446 458 * ··· 703 691 */ 704 692 705 693 static int mei_txe_write(struct mei_device *dev, 706 - struct mei_msg_hdr *header, unsigned char *buf) 694 + struct mei_msg_hdr *header, 695 + const unsigned char *buf) 707 696 { 708 697 struct mei_txe_hw *hw = to_txe_hw(dev); 709 698 unsigned long rem; ··· 1180 1167 .intr_clear = mei_txe_intr_clear, 1181 1168 .intr_enable = mei_txe_intr_enable, 1182 1169 .intr_disable = mei_txe_intr_disable, 1170 + .synchronize_irq = mei_txe_synchronize_irq, 1183 1171 1184 1172 .hbuf_free_slots = mei_txe_hbuf_empty_slots, 1185 1173 .hbuf_is_ready = mei_txe_is_input_ready,
+4 -2
drivers/misc/mei/init.c
··· 122 122 mei_dev_state_str(state), fw_sts_str); 123 123 } 124 124 125 + mei_clear_interrupts(dev); 126 + 127 + mei_synchronize_irq(dev); 128 + 125 129 /* we're already in reset, cancel the init timer 126 130 * if the reset was called due the hbm protocol error 127 131 * we need to call it before hw start ··· 276 272 int err; 277 273 278 274 mutex_lock(&dev->device_lock); 279 - 280 - mei_clear_interrupts(dev); 281 275 282 276 dev->dev_state = MEI_DEV_POWER_UP; 283 277 dev->reset_count = 0;
+2 -5
drivers/misc/mei/interrupt.c
··· 118 118 119 119 if (!mei_cl_is_connected(cl)) { 120 120 cl_dbg(dev, cl, "not connected\n"); 121 - list_move_tail(&cb->list, &complete_list->list); 122 121 cb->status = -ENODEV; 123 122 goto discard; 124 123 } ··· 127 128 if (buf_sz < cb->buf_idx) { 128 129 cl_err(dev, cl, "message is too big len %d idx %zu\n", 129 130 mei_hdr->length, cb->buf_idx); 130 - 131 - list_move_tail(&cb->list, &complete_list->list); 132 131 cb->status = -EMSGSIZE; 133 132 goto discard; 134 133 } ··· 134 137 if (cb->buf.size < buf_sz) { 135 138 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", 136 139 cb->buf.size, mei_hdr->length, cb->buf_idx); 137 - 138 - list_move_tail(&cb->list, &complete_list->list); 139 140 cb->status = -EMSGSIZE; 140 141 goto discard; 141 142 } ··· 153 158 return 0; 154 159 155 160 discard: 161 + if (cb) 162 + list_move_tail(&cb->list, &complete_list->list); 156 163 mei_irq_discard_msg(dev, mei_hdr); 157 164 return 0; 158 165 }
+43 -2
drivers/misc/mei/main.c
··· 322 322 goto out; 323 323 } 324 324 325 - rets = mei_cl_write(cl, cb, false); 325 + rets = mei_cl_write(cl, cb); 326 326 out: 327 327 mutex_unlock(&dev->device_lock); 328 328 return rets; ··· 653 653 } 654 654 655 655 /** 656 - * fw_status_show - mei device attribute show method 656 + * fw_status_show - mei device fw_status attribute show method 657 657 * 658 658 * @device: device pointer 659 659 * @attr: attribute pointer ··· 684 684 } 685 685 static DEVICE_ATTR_RO(fw_status); 686 686 687 + /** 688 + * hbm_ver_show - display HBM protocol version negotiated with FW 689 + * 690 + * @device: device pointer 691 + * @attr: attribute pointer 692 + * @buf: char out buffer 693 + * 694 + * Return: number of the bytes printed into buf or error 695 + */ 696 + static ssize_t hbm_ver_show(struct device *device, 697 + struct device_attribute *attr, char *buf) 698 + { 699 + struct mei_device *dev = dev_get_drvdata(device); 700 + struct hbm_version ver; 701 + 702 + mutex_lock(&dev->device_lock); 703 + ver = dev->version; 704 + mutex_unlock(&dev->device_lock); 705 + 706 + return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version); 707 + } 708 + static DEVICE_ATTR_RO(hbm_ver); 709 + 710 + /** 711 + * hbm_ver_drv_show - display HBM protocol version advertised by driver 712 + * 713 + * @device: device pointer 714 + * @attr: attribute pointer 715 + * @buf: char out buffer 716 + * 717 + * Return: number of the bytes printed into buf or error 718 + */ 719 + static ssize_t hbm_ver_drv_show(struct device *device, 720 + struct device_attribute *attr, char *buf) 721 + { 722 + return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION); 723 + } 724 + static DEVICE_ATTR_RO(hbm_ver_drv); 725 + 687 726 static struct attribute *mei_attrs[] = { 688 727 &dev_attr_fw_status.attr, 728 + &dev_attr_hbm_ver.attr, 729 + &dev_attr_hbm_ver_drv.attr, 689 730 NULL 690 731 }; 691 732 ATTRIBUTE_GROUPS(mei);
+31 -7
drivers/misc/mei/mei_dev.h
··· 55 55 56 56 /* File state */ 57 57 enum file_state { 58 - MEI_FILE_INITIALIZING = 0, 58 + MEI_FILE_UNINITIALIZED = 0, 59 + MEI_FILE_INITIALIZING, 59 60 MEI_FILE_CONNECTING, 60 61 MEI_FILE_CONNECTED, 61 62 MEI_FILE_DISCONNECTING, ··· 108 107 MEI_FOP_DISCONNECT_RSP, 109 108 MEI_FOP_NOTIFY_START, 110 109 MEI_FOP_NOTIFY_STOP, 110 + }; 111 + 112 + /** 113 + * enum mei_cl_io_mode - io mode between driver and fw 114 + * 115 + * @MEI_CL_IO_TX_BLOCKING: send is blocking 116 + * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW 117 + * 118 + * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking 119 + */ 120 + enum mei_cl_io_mode { 121 + MEI_CL_IO_TX_BLOCKING = BIT(0), 122 + MEI_CL_IO_TX_INTERNAL = BIT(1), 123 + 124 + MEI_CL_IO_RX_NONBLOCK = BIT(2), 111 125 }; 112 126 113 127 /* ··· 185 169 * @fp: pointer to file structure 186 170 * @status: io status of the cb 187 171 * @internal: communication between driver and FW flag 172 + * @blocking: transmission blocking mode 188 173 * @completed: the transfer or reception has completed 189 174 */ 190 175 struct mei_cl_cb { ··· 197 180 const struct file *fp; 198 181 int status; 199 182 u32 internal:1; 183 + u32 blocking:1; 200 184 u32 completed:1; 201 185 }; 202 186 ··· 271 253 * @intr_clear : clear pending interrupts 272 254 * @intr_enable : enable interrupts 273 255 * @intr_disable : disable interrupts 256 + * @synchronize_irq : synchronize irqs 274 257 * 275 258 * @hbuf_free_slots : query for write buffer empty slots 276 259 * @hbuf_is_ready : query if write buffer is empty ··· 293 274 int (*hw_start)(struct mei_device *dev); 294 275 void (*hw_config)(struct mei_device *dev); 295 276 296 - 297 277 int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); 298 278 enum mei_pg_state (*pg_state)(struct mei_device *dev); 299 279 bool (*pg_in_transition)(struct mei_device *dev); ··· 301 283 void (*intr_clear)(struct mei_device *dev); 302 284 void (*intr_enable)(struct mei_device *dev); 303 285 void (*intr_disable)(struct mei_device *dev); 286 + void (*synchronize_irq)(struct mei_device *dev); 304 287 305 288 int (*hbuf_free_slots)(struct mei_device *dev); 306 289 bool (*hbuf_is_ready)(struct mei_device *dev); 307 290 size_t (*hbuf_max_len)(const struct mei_device *dev); 308 - 309 291 int (*write)(struct mei_device *dev, 310 292 struct mei_msg_hdr *hdr, 311 - unsigned char *buf); 293 + const unsigned char *buf); 312 294 313 295 int (*rdbuf_full_slots)(struct mei_device *dev); 314 296 ··· 322 304 void mei_cl_bus_rescan_work(struct work_struct *work); 323 305 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); 324 306 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, 325 - bool blocking); 326 - ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); 307 + unsigned int mode); 308 + ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, 309 + unsigned int mode); 327 310 bool mei_cl_bus_rx_event(struct mei_cl *cl); 328 311 bool mei_cl_bus_notify_event(struct mei_cl *cl); 329 312 void mei_cl_bus_remove_devices(struct mei_device *bus); ··· 646 627 dev->ops->intr_disable(dev); 647 628 } 648 629 630 + static inline void mei_synchronize_irq(struct mei_device *dev) 631 + { 632 + dev->ops->synchronize_irq(dev); 633 + } 634 + 649 635 static inline bool mei_host_is_ready(struct mei_device *dev) 650 636 { 651 637 return dev->ops->host_is_ready(dev); ··· 676 652 } 677 653 678 654 static inline int mei_write_message(struct mei_device *dev, 679 - struct mei_msg_hdr *hdr, void *buf) 655 + struct mei_msg_hdr *hdr, const void *buf) 680 656 { 681 657 return dev->ops->write(dev, hdr, buf); 682 658 }
+1
drivers/misc/mei/pci-me.c
··· 87 87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, 88 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, 89 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, 90 + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)}, 90 91 91 92 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, 92 93 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+22 -24
drivers/nfc/mei_phy.c
··· 297 297 } 298 298 299 299 300 - static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events, 301 - void *context) 300 + static void nfc_mei_rx_cb(struct mei_cl_device *cldev) 302 301 { 303 - struct nfc_mei_phy *phy = context; 302 + struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); 303 + struct sk_buff *skb; 304 + int reply_size; 305 + 306 + if (!phy) 307 + return; 304 308 305 309 if (phy->hard_fault != 0) 306 310 return; 307 311 308 - if (events & BIT(MEI_CL_EVENT_RX)) { 309 - struct sk_buff *skb; 310 - int reply_size; 312 + skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); 313 + if (!skb) 314 + return; 311 315 312 - skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); 313 - if (!skb) 314 - return; 315 - 316 - reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); 317 - if (reply_size < MEI_NFC_HEADER_SIZE) { 318 - kfree_skb(skb); 319 - return; 320 - } 321 - 322 - skb_put(skb, reply_size); 323 - skb_pull(skb, MEI_NFC_HEADER_SIZE); 324 - 325 - MEI_DUMP_SKB_IN("mei frame read", skb); 326 - 327 - nfc_hci_recv_frame(phy->hdev, skb); 316 + reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); 317 + if (reply_size < MEI_NFC_HEADER_SIZE) { 318 + kfree_skb(skb); 319 + return; 328 320 } 321 + 322 + skb_put(skb, reply_size); 323 + skb_pull(skb, MEI_NFC_HEADER_SIZE); 324 + 325 + MEI_DUMP_SKB_IN("mei frame read", skb); 326 + 327 + nfc_hci_recv_frame(phy->hdev, skb); 329 328 } 330 329 331 330 static int nfc_mei_phy_enable(void *phy_id) ··· 355 356 goto err; 356 357 } 357 358 358 - r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX), 359 - nfc_mei_event_cb, phy); 359 + r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb); 360 360 if (r) { 361 361 pr_err("Event cb registration failed %d\n", r); 362 362 goto err;
+1 -22
drivers/nfc/microread/mei.c
··· 82 82 .remove = microread_mei_remove, 83 83 }; 84 84 85 - static int microread_mei_init(void) 86 - { 87 - int r; 88 - 89 - pr_debug(DRIVER_DESC ": %s\n", __func__); 90 - 91 - r = mei_cldev_driver_register(&microread_driver); 92 - if (r) { 93 - pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); 94 - return r; 95 - } 96 - 97 - return 0; 98 - } 99 - 100 - static void microread_mei_exit(void) 101 - { 102 - mei_cldev_driver_unregister(&microread_driver); 103 - } 104 - 105 - module_init(microread_mei_init); 106 - module_exit(microread_mei_exit); 85 + module_mei_cl_driver(microread_driver); 107 86 108 87 MODULE_LICENSE("GPL"); 109 88 MODULE_DESCRIPTION(DRIVER_DESC);
+1 -22
drivers/nfc/pn544/mei.c
··· 82 82 .remove = pn544_mei_remove, 83 83 }; 84 84 85 - static int pn544_mei_init(void) 86 - { 87 - int r; 88 - 89 - pr_debug(DRIVER_DESC ": %s\n", __func__); 90 - 91 - r = mei_cldev_driver_register(&pn544_driver); 92 - if (r) { 93 - pr_err(PN544_DRIVER_NAME ": driver registration failed\n"); 94 - return r; 95 - } 96 - 97 - return 0; 98 - } 99 - 100 - static void pn544_mei_exit(void) 101 - { 102 - mei_cldev_driver_unregister(&pn544_driver); 103 - } 104 - 105 - module_init(pn544_mei_init); 106 - module_exit(pn544_mei_exit); 85 + module_mei_cl_driver(pn544_driver); 107 86 108 87 MODULE_LICENSE("GPL"); 109 88 MODULE_DESCRIPTION(DRIVER_DESC);
+22
drivers/nvmem/Kconfig
··· 35 35 To compile this driver as a module, choose M here: the module 36 36 will be called nvmem_lpc18xx_eeprom. 37 37 38 + config NVMEM_LPC18XX_OTP 39 + tristate "NXP LPC18XX OTP Memory Support" 40 + depends on ARCH_LPC18XX || COMPILE_TEST 41 + depends on HAS_IOMEM 42 + help 43 + Say Y here to include support for NXP LPC18xx OTP memory found on 44 + all LPC18xx and LPC43xx devices. 45 + To compile this driver as a module, choose M here: the module 46 + will be called nvmem_lpc18xx_otp. 47 + 38 48 config NVMEM_MXS_OCOTP 39 49 tristate "Freescale MXS On-Chip OTP Memory Support" 40 50 depends on ARCH_MXS || COMPILE_TEST ··· 89 79 90 80 This driver can also be built as a module. If so, the module 91 81 will be called nvmem_rockchip_efuse. 82 + 83 + config NVMEM_BCM_OCOTP 84 + tristate "Broadcom On-Chip OTP Controller support" 85 + depends on ARCH_BCM_IPROC || COMPILE_TEST 86 + depends on HAS_IOMEM 87 + default ARCH_BCM_IPROC 88 + help 89 + Say y here to enable read/write access to the Broadcom OTP 90 + controller. 91 + 92 + This driver can also be built as a module. If so, the module 93 + will be called nvmem-bcm-ocotp. 92 94 93 95 config NVMEM_SUNXI_SID 94 96 tristate "Allwinner SoCs SID support"
+4
drivers/nvmem/Makefile
··· 6 6 nvmem_core-y := core.o 7 7 8 8 # Devices 9 + obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o 10 + nvmem-bcm-ocotp-y := bcm-ocotp.o 9 11 obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o 10 12 nvmem-imx-ocotp-y := imx-ocotp.o 11 13 obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o 12 14 nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o 15 + obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o 16 + nvmem_lpc18xx_otp-y := lpc18xx_otp.o 13 17 obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o 14 18 nvmem-mxs-ocotp-y := mxs-ocotp.o 15 19 obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
+335
drivers/nvmem/bcm-ocotp.c
··· 1 + /* 2 + * Copyright (C) 2016 Broadcom 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License as 6 + * published by the Free Software Foundation version 2. 7 + * 8 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 9 + * kind, whether express or implied; without even the implied warranty 10 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/delay.h> 15 + #include <linux/device.h> 16 + #include <linux/io.h> 17 + #include <linux/module.h> 18 + #include <linux/nvmem-provider.h> 19 + #include <linux/of.h> 20 + #include <linux/of_address.h> 21 + #include <linux/platform_device.h> 22 + 23 + /* 24 + * # of tries for OTP Status. The time to execute a command varies. The slowest 25 + * commands are writes which also vary based on the # of bits turned on. Writing 26 + * 0xffffffff takes ~3800 us. 27 + */ 28 + #define OTPC_RETRIES 5000 29 + 30 + /* Sequence to enable OTP program */ 31 + #define OTPC_PROG_EN_SEQ { 0xf, 0x4, 0x8, 0xd } 32 + 33 + /* OTPC Commands */ 34 + #define OTPC_CMD_READ 0x0 35 + #define OTPC_CMD_OTP_PROG_ENABLE 0x2 36 + #define OTPC_CMD_OTP_PROG_DISABLE 0x3 37 + #define OTPC_CMD_PROGRAM 0xA 38 + 39 + /* OTPC Status Bits */ 40 + #define OTPC_STAT_CMD_DONE BIT(1) 41 + #define OTPC_STAT_PROG_OK BIT(2) 42 + 43 + /* OTPC register definition */ 44 + #define OTPC_MODE_REG_OFFSET 0x0 45 + #define OTPC_MODE_REG_OTPC_MODE 0 46 + #define OTPC_COMMAND_OFFSET 0x4 47 + #define OTPC_COMMAND_COMMAND_WIDTH 6 48 + #define OTPC_CMD_START_OFFSET 0x8 49 + #define OTPC_CMD_START_START 0 50 + #define OTPC_CPU_STATUS_OFFSET 0xc 51 + #define OTPC_CPUADDR_REG_OFFSET 0x28 52 + #define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16 53 + #define OTPC_CPU_WRITE_REG_OFFSET 0x2c 54 + 55 + #define OTPC_CMD_MASK (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1) 56 + #define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1) 57 + 58 + 59 + struct otpc_map { 60 + /* in words. */ 61 + u32 otpc_row_size; 62 + /* 128 bit row / 4 words support. */ 63 + u16 data_r_offset[4]; 64 + /* 128 bit row / 4 words support. */ 65 + u16 data_w_offset[4]; 66 + }; 67 + 68 + static struct otpc_map otp_map = { 69 + .otpc_row_size = 1, 70 + .data_r_offset = {0x10}, 71 + .data_w_offset = {0x2c}, 72 + }; 73 + 74 + static struct otpc_map otp_map_v2 = { 75 + .otpc_row_size = 2, 76 + .data_r_offset = {0x10, 0x5c}, 77 + .data_w_offset = {0x2c, 0x64}, 78 + }; 79 + 80 + struct otpc_priv { 81 + struct device *dev; 82 + void __iomem *base; 83 + struct otpc_map *map; 84 + struct nvmem_config *config; 85 + }; 86 + 87 + static inline void set_command(void __iomem *base, u32 command) 88 + { 89 + writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET); 90 + } 91 + 92 + static inline void set_cpu_address(void __iomem *base, u32 addr) 93 + { 94 + writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET); 95 + } 96 + 97 + static inline void set_start_bit(void __iomem *base) 98 + { 99 + writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET); 100 + } 101 + 102 + static inline void reset_start_bit(void __iomem *base) 103 + { 104 + writel(0, base + OTPC_CMD_START_OFFSET); 105 + } 106 + 107 + static inline void write_cpu_data(void __iomem *base, u32 value) 108 + { 109 + writel(value, base + OTPC_CPU_WRITE_REG_OFFSET); 110 + } 111 + 112 + static int poll_cpu_status(void __iomem *base, u32 value) 113 + { 114 + u32 status; 115 + u32 retries; 116 + 117 + for (retries = 0; retries < OTPC_RETRIES; retries++) { 118 + status = readl(base + OTPC_CPU_STATUS_OFFSET); 119 + if (status & value) 120 + break; 121 + udelay(1); 122 + } 123 + if (retries == OTPC_RETRIES) 124 + return -EAGAIN; 125 + 126 + return 0; 127 + } 128 + 129 + static int enable_ocotp_program(void __iomem *base) 130 + { 131 + static const u32 vals[] = OTPC_PROG_EN_SEQ; 132 + int i; 133 + int ret; 134 + 135 + /* Write the magic sequence to enable programming */ 136 + set_command(base, OTPC_CMD_OTP_PROG_ENABLE); 137 + for (i = 0; i < ARRAY_SIZE(vals); i++) { 138 + write_cpu_data(base, vals[i]); 139 + set_start_bit(base); 140 + ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE); 141 + reset_start_bit(base); 142 + if (ret) 143 + return ret; 144 + } 145 + 146 + return poll_cpu_status(base, OTPC_STAT_PROG_OK); 147 + } 148 + 149 + static int disable_ocotp_program(void __iomem *base) 150 + { 151 + int ret; 152 + 153 + set_command(base, OTPC_CMD_OTP_PROG_DISABLE); 154 + set_start_bit(base); 155 + ret = poll_cpu_status(base, OTPC_STAT_PROG_OK); 156 + reset_start_bit(base); 157 + 158 + return ret; 159 + } 160 + 161 + static int bcm_otpc_read(void *context, unsigned int offset, void *val, 162 + size_t bytes) 163 + { 164 + struct otpc_priv *priv = context; 165 + u32 *buf = val; 166 + u32 bytes_read; 167 + u32 address = offset / priv->config->word_size; 168 + int i, ret; 169 + 170 + for (bytes_read = 0; bytes_read < bytes;) { 171 + set_command(priv->base, OTPC_CMD_READ); 172 + set_cpu_address(priv->base, address++); 173 + set_start_bit(priv->base); 174 + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); 175 + if (ret) { 176 + dev_err(priv->dev, "otp read error: 0x%x", ret); 177 + return -EIO; 178 + } 179 + 180 + for (i = 0; i < priv->map->otpc_row_size; i++) { 181 + *buf++ = readl(priv->base + 182 + priv->map->data_r_offset[i]); 183 + bytes_read += sizeof(*buf); 184 + } 185 + 186 + reset_start_bit(priv->base); 187 + } 188 + 189 + return 0; 190 + } 191 + 192 + static int bcm_otpc_write(void *context, unsigned int offset, void *val, 193 + size_t bytes) 194 + { 195 + struct otpc_priv *priv = context; 196 + u32 *buf = val; 197 + u32 bytes_written; 198 + u32 address = offset / priv->config->word_size; 199 + int i, ret; 200 + 201 + if (offset % priv->config->word_size) 202 + return -EINVAL; 203 + 204 + ret = enable_ocotp_program(priv->base); 205 + if (ret) 206 + return -EIO; 207 + 208 + for (bytes_written = 0; bytes_written < bytes;) { 209 + set_command(priv->base, OTPC_CMD_PROGRAM); 210 + set_cpu_address(priv->base, address++); 211 + for (i = 0; i < priv->map->otpc_row_size; i++) { 212 + writel(*buf, priv->base + priv->map->data_r_offset[i]); 213 + buf++; 214 + bytes_written += sizeof(*buf); 215 + } 216 + set_start_bit(priv->base); 217 + ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE); 218 + reset_start_bit(priv->base); 219 + if (ret) { 220 + dev_err(priv->dev, "otp write error: 0x%x", ret); 221 + return -EIO; 222 + } 223 + } 224 + 225 + disable_ocotp_program(priv->base); 226 + 227 + return 0; 228 + } 229 + 230 + static struct nvmem_config bcm_otpc_nvmem_config = { 231 + .name = "bcm-ocotp", 232 + .read_only = false, 233 + .word_size = 4, 234 + .stride = 4, 235 + .owner = THIS_MODULE, 236 + .reg_read = bcm_otpc_read, 237 + .reg_write = bcm_otpc_write, 238 + }; 239 + 240 + static const struct of_device_id bcm_otpc_dt_ids[] = { 241 + { .compatible = "brcm,ocotp" }, 242 + { .compatible = "brcm,ocotp-v2" }, 243 + { }, 244 + }; 245 + MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); 246 + 247 + static int bcm_otpc_probe(struct platform_device *pdev) 248 + { 249 + struct device *dev = &pdev->dev; 250 + struct device_node *dn = dev->of_node; 251 + struct resource *res; 252 + struct otpc_priv *priv; 253 + struct nvmem_device *nvmem; 254 + int err; 255 + u32 num_words; 256 + 257 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 258 + if (!priv) 259 + return -ENOMEM; 260 + 261 + if (of_device_is_compatible(dev->of_node, "brcm,ocotp")) 262 + priv->map = &otp_map; 263 + else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) 264 + priv->map = &otp_map_v2; 265 + else { 266 + dev_err(&pdev->dev, 267 + "%s otpc config map not defined\n", __func__); 268 + return -EINVAL; 269 + } 270 + 271 + /* Get OTP base address register. */ 272 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 273 + priv->base = devm_ioremap_resource(dev, res); 274 + if (IS_ERR(priv->base)) { 275 + dev_err(dev, "unable to map I/O memory\n"); 276 + return PTR_ERR(priv->base); 277 + } 278 + 279 + /* Enable CPU access to OTPC. */ 280 + writel(readl(priv->base + OTPC_MODE_REG_OFFSET) | 281 + BIT(OTPC_MODE_REG_OTPC_MODE), 282 + priv->base + OTPC_MODE_REG_OFFSET); 283 + reset_start_bit(priv->base); 284 + 285 + /* Read size of memory in words. */ 286 + err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words); 287 + if (err) { 288 + dev_err(dev, "size parameter not specified\n"); 289 + return -EINVAL; 290 + } else if (num_words == 0) { 291 + dev_err(dev, "size must be > 0\n"); 292 + return -EINVAL; 293 + } 294 + 295 + bcm_otpc_nvmem_config.size = 4 * num_words; 296 + bcm_otpc_nvmem_config.dev = dev; 297 + bcm_otpc_nvmem_config.priv = priv; 298 + 299 + if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) { 300 + bcm_otpc_nvmem_config.word_size = 8; 301 + bcm_otpc_nvmem_config.stride = 8; 302 + } 303 + 304 + priv->config = &bcm_otpc_nvmem_config; 305 + 306 + nvmem = nvmem_register(&bcm_otpc_nvmem_config); 307 + if (IS_ERR(nvmem)) { 308 + dev_err(dev, "error registering nvmem config\n"); 309 + return PTR_ERR(nvmem); 310 + } 311 + 312 + platform_set_drvdata(pdev, nvmem); 313 + 314 + return 0; 315 + } 316 + 317 + static int bcm_otpc_remove(struct platform_device *pdev) 318 + { 319 + struct nvmem_device *nvmem = platform_get_drvdata(pdev); 320 + 321 + return nvmem_unregister(nvmem); 322 + } 323 + 324 + static struct platform_driver bcm_otpc_driver = { 325 + .probe = bcm_otpc_probe, 326 + .remove = bcm_otpc_remove, 327 + .driver = { 328 + .name = "brcm-otpc", 329 + .of_match_table = bcm_otpc_dt_ids, 330 + }, 331 + }; 332 + module_platform_driver(bcm_otpc_driver); 333 + 334 + MODULE_DESCRIPTION("Broadcom OTPC driver"); 335 + MODULE_LICENSE("GPL v2");
+124
drivers/nvmem/lpc18xx_otp.c
··· 1 + /* 2 + * NXP LPC18xx/43xx OTP memory NVMEM driver 3 + * 4 + * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com> 5 + * 6 + * Based on the imx ocotp driver, 7 + * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 11 + * as published by the Free Software Foundation. 12 + * 13 + * TODO: add support for writing OTP register via API in boot ROM. 14 + */ 15 + 16 + #include <linux/io.h> 17 + #include <linux/module.h> 18 + #include <linux/nvmem-provider.h> 19 + #include <linux/of.h> 20 + #include <linux/of_device.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/slab.h> 23 + 24 + /* 25 + * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts 26 + * at offset 0 from the base. 27 + * 28 + * Bank 0 contains the part ID for Flashless devices and is reseverd for 29 + * devices with Flash. 30 + * Bank 1/2 is generale purpose or AES key storage for secure devices. 31 + * Bank 3 contains control data, USB ID and generale purpose words. 32 + */ 33 + #define LPC18XX_OTP_NUM_BANKS 4 34 + #define LPC18XX_OTP_WORDS_PER_BANK 4 35 + #define LPC18XX_OTP_WORD_SIZE sizeof(u32) 36 + #define LPC18XX_OTP_SIZE (LPC18XX_OTP_NUM_BANKS * \ 37 + LPC18XX_OTP_WORDS_PER_BANK * \ 38 + LPC18XX_OTP_WORD_SIZE) 39 + 40 + struct lpc18xx_otp { 41 + void __iomem *base; 42 + }; 43 + 44 + static int lpc18xx_otp_read(void *context, unsigned int offset, 45 + void *val, size_t bytes) 46 + { 47 + struct lpc18xx_otp *otp = context; 48 + unsigned int count = bytes >> 2; 49 + u32 index = offset >> 2; 50 + u32 *buf = val; 51 + int i; 52 + 53 + if (count > (LPC18XX_OTP_SIZE - index)) 54 + count = LPC18XX_OTP_SIZE - index; 55 + 56 + for (i = index; i < (index + count); i++) 57 + *buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE); 58 + 59 + return 0; 60 + } 61 + 62 + static struct nvmem_config lpc18xx_otp_nvmem_config = { 63 + .name = "lpc18xx-otp", 64 + .read_only = true, 65 + .word_size = LPC18XX_OTP_WORD_SIZE, 66 + .stride = LPC18XX_OTP_WORD_SIZE, 67 + .owner = THIS_MODULE, 68 + .reg_read = lpc18xx_otp_read, 69 + }; 70 + 71 + static int lpc18xx_otp_probe(struct platform_device *pdev) 72 + { 73 + struct nvmem_device *nvmem; 74 + struct lpc18xx_otp *otp; 75 + struct resource *res; 76 + 77 + otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL); 78 + if (!otp) 79 + return -ENOMEM; 80 + 81 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 82 + otp->base = devm_ioremap_resource(&pdev->dev, res); 83 + if (IS_ERR(otp->base)) 84 + return PTR_ERR(otp->base); 85 + 86 + lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE; 87 + lpc18xx_otp_nvmem_config.dev = &pdev->dev; 88 + lpc18xx_otp_nvmem_config.priv = otp; 89 + 90 + nvmem = nvmem_register(&lpc18xx_otp_nvmem_config); 91 + if (IS_ERR(nvmem)) 92 + return PTR_ERR(nvmem); 93 + 94 + platform_set_drvdata(pdev, nvmem); 95 + 96 + return 0; 97 + } 98 + 99 + static int lpc18xx_otp_remove(struct platform_device *pdev) 100 + { 101 + struct nvmem_device *nvmem = platform_get_drvdata(pdev); 102 + 103 + return nvmem_unregister(nvmem); 104 + } 105 + 106 + static const struct of_device_id lpc18xx_otp_dt_ids[] = { 107 + { .compatible = "nxp,lpc1850-otp" }, 108 + { }, 109 + }; 110 + MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids); 111 + 112 + static struct platform_driver lpc18xx_otp_driver = { 113 + .probe = lpc18xx_otp_probe, 114 + .remove = lpc18xx_otp_remove, 115 + .driver = { 116 + .name = "lpc18xx_otp", 117 + .of_match_table = lpc18xx_otp_dt_ids, 118 + }, 119 + }; 120 + module_platform_driver(lpc18xx_otp_driver); 121 + 122 + MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>"); 123 + MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver"); 124 + MODULE_LICENSE("GPL v2");
+46 -1
drivers/of/overlay.c
··· 58 58 static int of_overlay_apply_one(struct of_overlay *ov, 59 59 struct device_node *target, const struct device_node *overlay); 60 60 61 + static BLOCKING_NOTIFIER_HEAD(of_overlay_chain); 62 + 63 + int of_overlay_notifier_register(struct notifier_block *nb) 64 + { 65 + return blocking_notifier_chain_register(&of_overlay_chain, nb); 66 + } 67 + EXPORT_SYMBOL_GPL(of_overlay_notifier_register); 68 + 69 + int of_overlay_notifier_unregister(struct notifier_block *nb) 70 + { 71 + return blocking_notifier_chain_unregister(&of_overlay_chain, nb); 72 + } 73 + EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister); 74 + 75 + static int of_overlay_notify(struct of_overlay *ov, 76 + enum of_overlay_notify_action action) 77 + { 78 + struct of_overlay_notify_data nd; 79 + int i, ret; 80 + 81 + for (i = 0; i < ov->count; i++) { 82 + struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i]; 83 + 84 + nd.target = ovinfo->target; 85 + nd.overlay = ovinfo->overlay; 86 + 87 + ret = blocking_notifier_call_chain(&of_overlay_chain, 88 + action, &nd); 89 + if (ret) 90 + return notifier_to_errno(ret); 91 + } 92 + 93 + return 0; 94 + } 95 + 61 96 static int of_overlay_apply_single_property(struct of_overlay *ov, 62 97 struct device_node *target, struct property *prop) 63 98 { ··· 403 368 goto err_free_idr; 404 369 } 405 370 371 + err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY); 372 + if (err < 0) { 373 + pr_err("%s: Pre-apply notifier failed (err=%d)\n", 374 + __func__, err); 375 + goto err_free_idr; 376 + } 377 + 406 378 /* apply the overlay */ 407 379 err = of_overlay_apply(ov); 408 380 if (err) ··· 423 381 424 382 /* add to the tail of the overlay list */ 425 383 list_add_tail(&ov->node, &ov_list); 384 + 385 + of_overlay_notify(ov, OF_OVERLAY_POST_APPLY); 426 386 427 387 mutex_unlock(&of_mutex); 428 388 ··· 542 498 goto out; 543 499 } 544 500 545 - 501 + of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE); 546 502 list_del(&ov->node); 547 503 __of_changeset_revert(&ov->cset); 504 + of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE); 548 505 of_free_overlay_info(ov); 549 506 idr_remove(&ov_idr, id); 550 507 of_changeset_destroy(&ov->cset);
+2 -4
drivers/platform/goldfish/goldfish_pipe.c
··· 308 308 * returns a small amount, then there's no need to pin that 309 309 * much memory to the process. 310 310 */ 311 - down_read(&current->mm->mmap_sem); 312 - ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE, 313 - &page, NULL); 314 - up_read(&current->mm->mmap_sem); 311 + ret = get_user_pages_unlocked(address, 1, &page, 312 + is_write ? 0 : FOLL_WRITE); 315 313 if (ret < 0) 316 314 break; 317 315
+2 -2
drivers/s390/char/sclp_ctl.c
··· 10 10 #include <linux/uaccess.h> 11 11 #include <linux/miscdevice.h> 12 12 #include <linux/gfp.h> 13 - #include <linux/module.h> 13 + #include <linux/init.h> 14 14 #include <linux/ioctl.h> 15 15 #include <linux/fs.h> 16 16 #include <asm/compat.h> ··· 126 126 .name = "sclp", 127 127 .fops = &sclp_ctl_fops, 128 128 }; 129 - module_misc_device(sclp_ctl_device); 129 + builtin_misc_device(sclp_ctl_device);
+3 -3
drivers/thunderbolt/nhi_regs.h
··· 1 1 /* 2 - * Thunderbolt Cactus Ridge driver - NHI registers 2 + * Thunderbolt driver - NHI registers 3 3 * 4 4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 5 */ 6 6 7 - #ifndef DSL3510_REGS_H_ 8 - #define DSL3510_REGS_H_ 7 + #ifndef NHI_REGS_H_ 8 + #define NHI_REGS_H_ 9 9 10 10 #include <linux/types.h> 11 11
+9
drivers/uio/Kconfig
··· 155 155 156 156 If you compile this as a module, it will be called uio_mf624. 157 157 158 + config UIO_HV_GENERIC 159 + tristate "Generic driver for Hyper-V VMBus" 160 + depends on HYPERV 161 + help 162 + Generic driver that you can bind, dynamically, to any 163 + Hyper-V VMBus device. It is useful to provide direct access 164 + to network and storage devices from userspace. 165 + 166 + If you compile this as a module, it will be called uio_hv_generic. 158 167 endif
+1
drivers/uio/Makefile
··· 9 9 obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o 10 10 obj-$(CONFIG_UIO_MF624) += uio_mf624.o 11 11 obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o 12 + obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+218
drivers/uio/uio_hv_generic.c
··· 1 + /* 2 + * uio_hv_generic - generic UIO driver for VMBus 3 + * 4 + * Copyright (c) 2013-2016 Brocade Communications Systems, Inc. 5 + * Copyright (c) 2016, Microsoft Corporation. 6 + * 7 + * 8 + * This work is licensed under the terms of the GNU GPL, version 2. 9 + * 10 + * Since the driver does not declare any device ids, you must allocate 11 + * id and bind the device to the driver yourself. For example: 12 + * 13 + * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \ 14 + * > /sys/bus/vmbus/drivers/uio_hv_generic 15 + * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \ 16 + * > /sys/bus/vmbus/drivers/hv_netvsc/unbind 17 + * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \ 18 + * > /sys/bus/vmbus/drivers/uio_hv_generic/bind 19 + */ 20 + 21 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 + 23 + #include <linux/device.h> 24 + #include <linux/kernel.h> 25 + #include <linux/module.h> 26 + #include <linux/uio_driver.h> 27 + #include <linux/netdevice.h> 28 + #include <linux/if_ether.h> 29 + #include <linux/skbuff.h> 30 + #include <linux/hyperv.h> 31 + #include <linux/vmalloc.h> 32 + #include <linux/slab.h> 33 + 34 + #include "../hv/hyperv_vmbus.h" 35 + 36 + #define DRIVER_VERSION "0.02.0" 37 + #define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>" 38 + #define DRIVER_DESC "Generic UIO driver for VMBus devices" 39 + 40 + /* 41 + * List of resources to be mapped to user space 42 + * can be extended up to MAX_UIO_MAPS(5) items 43 + */ 44 + enum hv_uio_map { 45 + TXRX_RING_MAP = 0, 46 + INT_PAGE_MAP, 47 + MON_PAGE_MAP, 48 + }; 49 + 50 + #define HV_RING_SIZE 512 51 + 52 + struct hv_uio_private_data { 53 + struct uio_info info; 54 + struct hv_device *device; 55 + }; 56 + 57 + static int 58 + hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma) 59 + { 60 + int mi; 61 + 62 + if (vma->vm_pgoff >= MAX_UIO_MAPS) 63 + return -EINVAL; 64 + 65 + if (info->mem[vma->vm_pgoff].size == 0) 66 + return -EINVAL; 67 + 68 + mi = (int)vma->vm_pgoff; 69 + 70 + return remap_pfn_range(vma, vma->vm_start, 71 + info->mem[mi].addr >> PAGE_SHIFT, 72 + vma->vm_end - vma->vm_start, vma->vm_page_prot); 73 + } 74 + 75 + /* 76 + * This is the irqcontrol callback to be registered to uio_info. 77 + * It can be used to disable/enable interrupt from user space processes. 78 + * 79 + * @param info 80 + * pointer to uio_info. 81 + * @param irq_state 82 + * state value. 1 to enable interrupt, 0 to disable interrupt. 83 + */ 84 + static int 85 + hv_uio_irqcontrol(struct uio_info *info, s32 irq_state) 86 + { 87 + struct hv_uio_private_data *pdata = info->priv; 88 + struct hv_device *dev = pdata->device; 89 + 90 + dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state; 91 + virt_mb(); 92 + 93 + return 0; 94 + } 95 + 96 + /* 97 + * Callback from vmbus_event when something is in inbound ring. 98 + */ 99 + static void hv_uio_channel_cb(void *context) 100 + { 101 + struct hv_uio_private_data *pdata = context; 102 + struct hv_device *dev = pdata->device; 103 + 104 + dev->channel->inbound.ring_buffer->interrupt_mask = 1; 105 + virt_mb(); 106 + 107 + uio_event_notify(&pdata->info); 108 + } 109 + 110 + static int 111 + hv_uio_probe(struct hv_device *dev, 112 + const struct hv_vmbus_device_id *dev_id) 113 + { 114 + struct hv_uio_private_data *pdata; 115 + int ret; 116 + 117 + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); 118 + if (!pdata) 119 + return -ENOMEM; 120 + 121 + ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, 122 + HV_RING_SIZE * PAGE_SIZE, NULL, 0, 123 + hv_uio_channel_cb, pdata); 124 + if (ret) 125 + goto fail; 126 + 127 + dev->channel->inbound.ring_buffer->interrupt_mask = 1; 128 + dev->channel->batched_reading = false; 129 + 130 + /* Fill general uio info */ 131 + pdata->info.name = "uio_hv_generic"; 132 + pdata->info.version = DRIVER_VERSION; 133 + pdata->info.irqcontrol = hv_uio_irqcontrol; 134 + pdata->info.mmap = hv_uio_mmap; 135 + pdata->info.irq = UIO_IRQ_CUSTOM; 136 + 137 + /* mem resources */ 138 + pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings"; 139 + pdata->info.mem[TXRX_RING_MAP].addr 140 + = virt_to_phys(dev->channel->ringbuffer_pages); 141 + pdata->info.mem[TXRX_RING_MAP].size 142 + = dev->channel->ringbuffer_pagecount * PAGE_SIZE; 143 + pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL; 144 + 145 + pdata->info.mem[INT_PAGE_MAP].name = "int_page"; 146 + pdata->info.mem[INT_PAGE_MAP].addr = 147 + virt_to_phys(vmbus_connection.int_page); 148 + pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE; 149 + pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL; 150 + 151 + pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages"; 152 + pdata->info.mem[MON_PAGE_MAP].addr = 153 + virt_to_phys(vmbus_connection.monitor_pages[1]); 154 + pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE; 155 + pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL; 156 + 157 + pdata->info.priv = pdata; 158 + pdata->device = dev; 159 + 160 + ret = uio_register_device(&dev->device, &pdata->info); 161 + if (ret) { 162 + dev_err(&dev->device, "hv_uio register failed\n"); 163 + goto fail_close; 164 + } 165 + 166 + hv_set_drvdata(dev, pdata); 167 + 168 + return 0; 169 + 170 + fail_close: 171 + vmbus_close(dev->channel); 172 + fail: 173 + kfree(pdata); 174 + 175 + return ret; 176 + } 177 + 178 + static int 179 + hv_uio_remove(struct hv_device *dev) 180 + { 181 + struct hv_uio_private_data *pdata = hv_get_drvdata(dev); 182 + 183 + if (!pdata) 184 + return 0; 185 + 186 + uio_unregister_device(&pdata->info); 187 + hv_set_drvdata(dev, NULL); 188 + vmbus_close(dev->channel); 189 + kfree(pdata); 190 + return 0; 191 + } 192 + 193 + static struct hv_driver hv_uio_drv = { 194 + .name = "uio_hv_generic", 195 + .id_table = NULL, /* only dynamic id's */ 196 + .probe = hv_uio_probe, 197 + .remove = hv_uio_remove, 198 + }; 199 + 200 + static int __init 201 + hyperv_module_init(void) 202 + { 203 + return vmbus_driver_register(&hv_uio_drv); 204 + } 205 + 206 + static void __exit 207 + hyperv_module_exit(void) 208 + { 209 + vmbus_driver_unregister(&hv_uio_drv); 210 + } 211 + 212 + module_init(hyperv_module_init); 213 + module_exit(hyperv_module_exit); 214 + 215 + MODULE_VERSION(DRIVER_VERSION); 216 + MODULE_LICENSE("GPL v2"); 217 + MODULE_AUTHOR(DRIVER_AUTHOR); 218 + MODULE_DESCRIPTION(DRIVER_DESC);
+9 -1
drivers/uio/uio_pruss.c
··· 111 111 gdev->sram_vaddr, 112 112 sram_pool_sz); 113 113 kfree(gdev->info); 114 + clk_disable(gdev->pruss_clk); 114 115 clk_put(gdev->pruss_clk); 115 116 kfree(gdev); 116 117 } ··· 144 143 kfree(gdev); 145 144 return ret; 146 145 } else { 147 - clk_enable(gdev->pruss_clk); 146 + ret = clk_enable(gdev->pruss_clk); 147 + if (ret) { 148 + dev_err(dev, "Failed to enable clock\n"); 149 + clk_put(gdev->pruss_clk); 150 + kfree(gdev->info); 151 + kfree(gdev); 152 + return ret; 153 + } 148 154 } 149 155 150 156 regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+12 -46
drivers/watchdog/mei_wdt.c
··· 410 410 } 411 411 412 412 /** 413 - * mei_wdt_event_rx - callback for data receive 413 + * mei_wdt_rx - callback for data receive 414 414 * 415 415 * @cldev: bus device 416 416 */ 417 - static void mei_wdt_event_rx(struct mei_cl_device *cldev) 417 + static void mei_wdt_rx(struct mei_cl_device *cldev) 418 418 { 419 419 struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); 420 420 struct mei_wdt_start_response res; ··· 482 482 } 483 483 484 484 /* 485 - * mei_wdt_notify_event - callback for event notification 485 + * mei_wdt_notif - callback for event notification 486 486 * 487 487 * @cldev: bus device 488 488 */ 489 - static void mei_wdt_notify_event(struct mei_cl_device *cldev) 489 + static void mei_wdt_notif(struct mei_cl_device *cldev) 490 490 { 491 491 struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); 492 492 ··· 494 494 return; 495 495 496 496 mei_wdt_register(wdt); 497 - } 498 - 499 - /** 500 - * mei_wdt_event - callback for event receive 501 - * 502 - * @cldev: bus device 503 - * @events: event mask 504 - * @context: callback context 505 - */ 506 - static void mei_wdt_event(struct mei_cl_device *cldev, 507 - u32 events, void *context) 508 - { 509 - if (events & BIT(MEI_CL_EVENT_RX)) 510 - mei_wdt_event_rx(cldev); 511 - 512 - if (events & BIT(MEI_CL_EVENT_NOTIF)) 513 - mei_wdt_notify_event(cldev); 514 497 } 515 498 516 499 #if IS_ENABLED(CONFIG_DEBUG_FS) ··· 606 623 goto err_out; 607 624 } 608 625 609 - ret = mei_cldev_register_event_cb(wdt->cldev, 610 - BIT(MEI_CL_EVENT_RX) | 611 - BIT(MEI_CL_EVENT_NOTIF), 612 - mei_wdt_event, NULL); 626 + ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx); 627 + if (ret) { 628 + dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret); 629 + goto err_disable; 630 + } 613 631 632 + ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif); 614 633 /* on legacy devices notification is not supported 615 - * this doesn't fail the registration for RX event 616 634 */ 617 635 if (ret && ret != -EOPNOTSUPP) { 618 - dev_err(&cldev->dev, "Could not register event ret=%d\n", ret); 636 + dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret); 619 637 goto err_disable; 620 638 } 621 639 ··· 683 699 .remove = mei_wdt_remove, 684 700 }; 685 701 686 - static int __init mei_wdt_init(void) 687 - { 688 - int ret; 689 - 690 - ret = mei_cldev_driver_register(&mei_wdt_driver); 691 - if (ret) { 692 - pr_err(KBUILD_MODNAME ": module registration failed\n"); 693 - return ret; 694 - } 695 - return 0; 696 - } 697 - 698 - static void __exit mei_wdt_exit(void) 699 - { 700 - mei_cldev_driver_unregister(&mei_wdt_driver); 701 - } 702 - 703 - module_init(mei_wdt_init); 704 - module_exit(mei_wdt_exit); 702 + module_mei_cl_driver(mei_wdt_driver); 705 703 706 704 MODULE_AUTHOR("Intel Corporation"); 707 705 MODULE_LICENSE("GPL");
+60
include/linux/fpga/fpga-bridge.h
··· 1 + #include <linux/device.h> 2 + #include <linux/fpga/fpga-mgr.h> 3 + 4 + #ifndef _LINUX_FPGA_BRIDGE_H 5 + #define _LINUX_FPGA_BRIDGE_H 6 + 7 + struct fpga_bridge; 8 + 9 + /** 10 + * struct fpga_bridge_ops - ops for low level FPGA bridge drivers 11 + * @enable_show: returns the FPGA bridge's status 12 + * @enable_set: set a FPGA bridge as enabled or disabled 13 + * @fpga_bridge_remove: set FPGA into a specific state during driver remove 14 + */ 15 + struct fpga_bridge_ops { 16 + int (*enable_show)(struct fpga_bridge *bridge); 17 + int (*enable_set)(struct fpga_bridge *bridge, bool enable); 18 + void (*fpga_bridge_remove)(struct fpga_bridge *bridge); 19 + }; 20 + 21 + /** 22 + * struct fpga_bridge - FPGA bridge structure 23 + * @name: name of low level FPGA bridge 24 + * @dev: FPGA bridge device 25 + * @mutex: enforces exclusive reference to bridge 26 + * @br_ops: pointer to struct of FPGA bridge ops 27 + * @info: fpga image specific information 28 + * @node: FPGA bridge list node 29 + * @priv: low level driver private date 30 + */ 31 + struct fpga_bridge { 32 + const char *name; 33 + struct device dev; 34 + struct mutex mutex; /* for exclusive reference to bridge */ 35 + const struct fpga_bridge_ops *br_ops; 36 + struct fpga_image_info *info; 37 + struct list_head node; 38 + void *priv; 39 + }; 40 + 41 + #define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev) 42 + 43 + struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, 44 + struct fpga_image_info *info); 45 + void fpga_bridge_put(struct fpga_bridge *bridge); 46 + int fpga_bridge_enable(struct fpga_bridge *bridge); 47 + int fpga_bridge_disable(struct fpga_bridge *bridge); 48 + 49 + int fpga_bridges_enable(struct list_head *bridge_list); 50 + int fpga_bridges_disable(struct list_head *bridge_list); 51 + void fpga_bridges_put(struct list_head *bridge_list); 52 + int fpga_bridge_get_to_list(struct device_node *np, 53 + struct fpga_image_info *info, 54 + struct list_head *bridge_list); 55 + 56 + int fpga_bridge_register(struct device *dev, const char *name, 57 + const struct fpga_bridge_ops *br_ops, void *priv); 58 + void fpga_bridge_unregister(struct device *dev); 59 + 60 + #endif /* _LINUX_FPGA_BRIDGE_H */
+25 -4
include/linux/fpga/fpga-mgr.h
··· 65 65 /* 66 66 * FPGA Manager flags 67 67 * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported 68 + * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting 68 69 */ 69 70 #define FPGA_MGR_PARTIAL_RECONFIG BIT(0) 71 + #define FPGA_MGR_EXTERNAL_CONFIG BIT(1) 72 + 73 + /** 74 + * struct fpga_image_info - information specific to a FPGA image 75 + * @flags: boolean flags as defined above 76 + * @enable_timeout_us: maximum time to enable traffic through bridge (uSec) 77 + * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) 78 + */ 79 + struct fpga_image_info { 80 + u32 flags; 81 + u32 enable_timeout_us; 82 + u32 disable_timeout_us; 83 + }; 70 84 71 85 /** 72 86 * struct fpga_manager_ops - ops for low level fpga manager drivers 87 + * @initial_header_size: Maximum number of bytes that should be passed into write_init 73 88 * @state: returns an enum value of the FPGA's state 74 89 * @write_init: prepare the FPGA to receive confuration data 75 90 * @write: write count bytes of configuration data to the FPGA ··· 96 81 * called, so leaving them out is fine. 97 82 */ 98 83 struct fpga_manager_ops { 84 + size_t initial_header_size; 99 85 enum fpga_mgr_states (*state)(struct fpga_manager *mgr); 100 - int (*write_init)(struct fpga_manager *mgr, u32 flags, 86 + int (*write_init)(struct fpga_manager *mgr, 87 + struct fpga_image_info *info, 101 88 const char *buf, size_t count); 102 89 int (*write)(struct fpga_manager *mgr, const char *buf, size_t count); 103 - int (*write_complete)(struct fpga_manager *mgr, u32 flags); 90 + int (*write_complete)(struct fpga_manager *mgr, 91 + struct fpga_image_info *info); 104 92 void (*fpga_remove)(struct fpga_manager *mgr); 105 93 }; 106 94 ··· 127 109 128 110 #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) 129 111 130 - int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, 112 + int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, 131 113 const char *buf, size_t count); 132 114 133 - int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags, 115 + int fpga_mgr_firmware_load(struct fpga_manager *mgr, 116 + struct fpga_image_info *info, 134 117 const char *image_name); 135 118 136 119 struct fpga_manager *of_fpga_mgr_get(struct device_node *node); 120 + 121 + struct fpga_manager *fpga_mgr_get(struct device *dev); 137 122 138 123 void fpga_mgr_put(struct fpga_manager *mgr); 139 124
+25 -28
include/linux/hyperv.h
··· 696 696 HV_FCOPY, 697 697 HV_BACKUP, 698 698 HV_DM, 699 - HV_UNKOWN, 699 + HV_UNKNOWN, 700 700 }; 701 701 702 702 struct vmbus_device { ··· 1119 1119 1120 1120 struct device_driver driver; 1121 1121 1122 + /* dynamic device GUID's */ 1123 + struct { 1124 + spinlock_t lock; 1125 + struct list_head list; 1126 + } dynids; 1127 + 1122 1128 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 1123 1129 int (*remove)(struct hv_device *); 1124 1130 void (*shutdown)(struct hv_device *); ··· 1453 1447 1454 1448 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1455 1449 1450 + void vmbus_setevent(struct vmbus_channel *channel); 1456 1451 /* 1457 1452 * Negotiated version with the Host. 1458 1453 */ ··· 1486 1479 * there is room for the producer to send the pending packet. 1487 1480 */ 1488 1481 1489 - static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) 1482 + static inline void hv_signal_on_read(struct vmbus_channel *channel) 1490 1483 { 1491 1484 u32 cur_write_sz; 1492 1485 u32 pending_sz; 1486 + struct hv_ring_buffer_info *rbi = &channel->inbound; 1493 1487 1494 1488 /* 1495 1489 * Issue a full memory barrier before making the signaling decision. ··· 1508 1500 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 1509 1501 /* If the other end is not blocked on write don't bother. */ 1510 1502 if (pending_sz == 0) 1511 - return false; 1503 + return; 1512 1504 1513 1505 cur_write_sz = hv_get_bytes_to_write(rbi); 1514 1506 1515 1507 if (cur_write_sz >= pending_sz) 1516 - return true; 1508 + vmbus_setevent(channel); 1517 1509 1518 - return false; 1510 + return; 1519 1511 } 1520 1512 1521 1513 /* ··· 1527 1519 get_next_pkt_raw(struct vmbus_channel *channel) 1528 1520 { 1529 1521 struct hv_ring_buffer_info *ring_info = &channel->inbound; 1530 - u32 read_loc = ring_info->priv_read_index; 1522 + u32 priv_read_loc = ring_info->priv_read_index; 1531 1523 void *ring_buffer = hv_get_ring_buffer(ring_info); 1532 - struct vmpacket_descriptor *cur_desc; 1533 - u32 packetlen; 1534 1524 u32 dsize = ring_info->ring_datasize; 1535 - u32 delta = read_loc - ring_info->ring_buffer->read_index; 1525 + /* 1526 + * delta is the difference between what is available to read and 1527 + * what was already consumed in place. We commit read index after 1528 + * the whole batch is processed. 1529 + */ 1530 + u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? 1531 + priv_read_loc - ring_info->ring_buffer->read_index : 1532 + (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; 1536 1533 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); 1537 1534 1538 1535 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) 1539 1536 return NULL; 1540 1537 1541 - if ((read_loc + sizeof(*cur_desc)) > dsize) 1542 - return NULL; 1543 - 1544 - cur_desc = ring_buffer + read_loc; 1545 - packetlen = cur_desc->len8 << 3; 1546 - 1547 - /* 1548 - * If the packet under consideration is wrapping around, 1549 - * return failure. 1550 - */ 1551 - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) 1552 - return NULL; 1553 - 1554 - return cur_desc; 1538 + return ring_buffer + priv_read_loc; 1555 1539 } 1556 1540 1557 1541 /* ··· 1555 1555 struct vmpacket_descriptor *desc) 1556 1556 { 1557 1557 struct hv_ring_buffer_info *ring_info = &channel->inbound; 1558 - u32 read_loc = ring_info->priv_read_index; 1559 1558 u32 packetlen = desc->len8 << 3; 1560 1559 u32 dsize = ring_info->ring_datasize; 1561 1560 1562 - if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) 1563 - BUG(); 1564 1561 /* 1565 1562 * Include the packet trailer. 1566 1563 */ 1567 1564 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; 1565 + ring_info->priv_read_index %= dsize; 1568 1566 } 1569 1567 1570 1568 /* ··· 1587 1589 virt_rmb(); 1588 1590 ring_info->ring_buffer->read_index = ring_info->priv_read_index; 1589 1591 1590 - if (hv_need_to_signal_on_read(ring_info)) 1591 - vmbus_set_event(channel); 1592 + hv_signal_on_read(channel); 1592 1593 } 1593 1594 1594 1595
+30 -21
include/linux/mei_cl_bus.h
··· 8 8 struct mei_cl_device; 9 9 struct mei_device; 10 10 11 - typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, 12 - u32 events, void *context); 11 + typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); 13 12 14 13 /** 15 14 * struct mei_cl_device - MEI device handle ··· 23 24 * @me_cl: me client 24 25 * @cl: mei client 25 26 * @name: device name 26 - * @event_work: async work to execute event callback 27 - * @event_cb: Drivers register this callback to get asynchronous ME 28 - * events (e.g. Rx buffer pending) notifications. 29 - * @event_context: event callback run context 30 - * @events_mask: Events bit mask requested by driver. 31 - * @events: Events bitmask sent to the driver. 27 + * @rx_work: async work to execute Rx event callback 28 + * @rx_cb: Drivers register this callback to get asynchronous ME 29 + * Rx buffer pending notifications. 30 + * @notif_work: async work to execute FW notif event callback 31 + * @notif_cb: Drivers register this callback to get asynchronous ME 32 + * FW notification pending notifications. 32 33 * 33 34 * @do_match: wheather device can be matched with a driver 34 35 * @is_added: device is already scanned ··· 43 44 struct mei_cl *cl; 44 45 char name[MEI_CL_NAME_SIZE]; 45 46 46 - struct work_struct event_work; 47 - mei_cldev_event_cb_t event_cb; 48 - void *event_context; 49 - unsigned long events_mask; 50 - unsigned long events; 47 + struct work_struct rx_work; 48 + mei_cldev_cb_t rx_cb; 49 + struct work_struct notif_work; 50 + mei_cldev_cb_t notif_cb; 51 51 52 52 unsigned int do_match:1; 53 53 unsigned int is_added:1; ··· 72 74 73 75 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); 74 76 77 + /** 78 + * module_mei_cl_driver - Helper macro for registering mei cl driver 79 + * 80 + * @__mei_cldrv: mei_cl_driver structure 81 + * 82 + * Helper macro for mei cl drivers which do not do anything special in module 83 + * init/exit, for eliminating a boilerplate code. 84 + */ 85 + #define module_mei_cl_driver(__mei_cldrv) \ 86 + module_driver(__mei_cldrv, \ 87 + mei_cldev_driver_register,\ 88 + mei_cldev_driver_unregister) 89 + 75 90 ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); 76 - ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); 91 + ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); 92 + ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, 93 + size_t length); 77 94 78 - int mei_cldev_register_event_cb(struct mei_cl_device *cldev, 79 - unsigned long event_mask, 80 - mei_cldev_event_cb_t read_cb, void *context); 81 - 82 - #define MEI_CL_EVENT_RX 0 83 - #define MEI_CL_EVENT_TX 1 84 - #define MEI_CL_EVENT_NOTIF 2 95 + int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); 96 + int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 97 + mei_cldev_cb_t notif_cb); 85 98 86 99 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); 87 100 u8 mei_cldev_ver(const struct mei_cl_device *cldev);
+7
include/linux/miscdevice.h
··· 72 72 extern void misc_deregister(struct miscdevice *misc); 73 73 74 74 /* 75 + * Helper macro for drivers that don't do anything special in the initcall. 76 + * This helps in eleminating of boilerplate code. 77 + */ 78 + #define builtin_misc_device(__misc_device) \ 79 + builtin_driver(__misc_device, misc_register) 80 + 81 + /* 75 82 * Helper macro for drivers that don't do anything special in module init / exit 76 83 * call. This helps in eleminating of boilerplate code. 77 84 */
+25
include/linux/of.h
··· 1266 1266 * Overlay support 1267 1267 */ 1268 1268 1269 + enum of_overlay_notify_action { 1270 + OF_OVERLAY_PRE_APPLY, 1271 + OF_OVERLAY_POST_APPLY, 1272 + OF_OVERLAY_PRE_REMOVE, 1273 + OF_OVERLAY_POST_REMOVE, 1274 + }; 1275 + 1276 + struct of_overlay_notify_data { 1277 + struct device_node *overlay; 1278 + struct device_node *target; 1279 + }; 1280 + 1269 1281 #ifdef CONFIG_OF_OVERLAY 1270 1282 1271 1283 /* ID based overlays; the API for external users */ 1272 1284 int of_overlay_create(struct device_node *tree); 1273 1285 int of_overlay_destroy(int id); 1274 1286 int of_overlay_destroy_all(void); 1287 + 1288 + int of_overlay_notifier_register(struct notifier_block *nb); 1289 + int of_overlay_notifier_unregister(struct notifier_block *nb); 1275 1290 1276 1291 #else 1277 1292 ··· 1303 1288 static inline int of_overlay_destroy_all(void) 1304 1289 { 1305 1290 return -ENOTSUPP; 1291 + } 1292 + 1293 + static inline int of_overlay_notifier_register(struct notifier_block *nb) 1294 + { 1295 + return 0; 1296 + } 1297 + 1298 + static inline int of_overlay_notifier_unregister(struct notifier_block *nb) 1299 + { 1300 + return 0; 1306 1301 } 1307 1302 1308 1303 #endif
-1
include/linux/vme.h
··· 113 113 int (*match)(struct vme_dev *); 114 114 int (*probe)(struct vme_dev *); 115 115 int (*remove)(struct vme_dev *); 116 - void (*shutdown)(void); 117 116 struct device_driver driver; 118 117 struct list_head devices; 119 118 };
+1
mm/memory.c
··· 4002 4002 4003 4003 return ret; 4004 4004 } 4005 + EXPORT_SYMBOL_GPL(access_process_vm); 4005 4006 4006 4007 /* 4007 4008 * Print the name of a VMA.
+1
mm/nommu.c
··· 1878 1878 mmput(mm); 1879 1879 return len; 1880 1880 } 1881 + EXPORT_SYMBOL_GPL(access_process_vm); 1881 1882 1882 1883 /** 1883 1884 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
+1 -1
scripts/checkkconfigsymbols.py
··· 88 88 if args.commit and args.diff: 89 89 sys.exit("Please specify only one option at once.") 90 90 91 - if args.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", args.diff): 91 + if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff): 92 92 sys.exit("Please specify valid input in the following format: " 93 93 "\'commit1..commit2\'") 94 94
+1 -2
tools/hv/Makefile
··· 1 1 # Makefile for Hyper-V tools 2 2 3 3 CC = $(CROSS_COMPILE)gcc 4 - PTHREAD_LIBS = -lpthread 5 4 WARNINGS = -Wall -Wextra 6 - CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS) 5 + CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS) 7 6 8 7 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include 9 8
-7
tools/hv/hv_fcopy_daemon.c
··· 18 18 19 19 20 20 #include <sys/types.h> 21 - #include <sys/socket.h> 22 - #include <sys/poll.h> 23 - #include <linux/types.h> 24 - #include <linux/kdev_t.h> 25 21 #include <stdio.h> 26 22 #include <stdlib.h> 27 23 #include <unistd.h> 28 - #include <string.h> 29 - #include <ctype.h> 30 24 #include <errno.h> 31 25 #include <linux/hyperv.h> 32 26 #include <syslog.h> 33 27 #include <sys/stat.h> 34 28 #include <fcntl.h> 35 - #include <dirent.h> 36 29 #include <getopt.h> 37 30 38 31 static int target_fd;
+9 -11
tools/hv/hv_kvp_daemon.c
··· 22 22 */ 23 23 24 24 25 - #include <sys/types.h> 26 - #include <sys/socket.h> 27 25 #include <sys/poll.h> 28 26 #include <sys/utsname.h> 29 27 #include <stdio.h> ··· 32 34 #include <errno.h> 33 35 #include <arpa/inet.h> 34 36 #include <linux/hyperv.h> 35 - #include <linux/netlink.h> 36 37 #include <ifaddrs.h> 37 38 #include <netdb.h> 38 39 #include <syslog.h> ··· 93 96 94 97 #define KVP_CONFIG_LOC "/var/lib/hyperv" 95 98 99 + #ifndef KVP_SCRIPTS_PATH 100 + #define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/" 101 + #endif 102 + 96 103 #define MAX_FILE_NAME 100 97 104 #define ENTRIES_PER_BLOCK 50 98 - 99 - #ifndef SOL_NETLINK 100 - #define SOL_NETLINK 270 101 - #endif 102 105 103 106 struct kvp_record { 104 107 char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; ··· 699 702 if (dir == NULL) 700 703 return NULL; 701 704 702 - snprintf(dev_id, sizeof(dev_id), kvp_net_dir); 705 + snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir); 703 706 q = dev_id + strlen(kvp_net_dir); 704 707 705 708 while ((entry = readdir(dir)) != NULL) { ··· 822 825 * . 823 826 */ 824 827 825 - sprintf(cmd, "%s", "hv_get_dns_info"); 828 + sprintf(cmd, KVP_SCRIPTS_PATH "%s", "hv_get_dns_info"); 826 829 827 830 /* 828 831 * Execute the command to gather DNS info. ··· 839 842 * Enabled: DHCP enabled. 840 843 */ 841 844 842 - sprintf(cmd, "%s %s", "hv_get_dhcp_info", if_name); 845 + sprintf(cmd, KVP_SCRIPTS_PATH "%s %s", "hv_get_dhcp_info", if_name); 843 846 844 847 file = popen(cmd, "r"); 845 848 if (file == NULL) ··· 1345 1348 * invoke the external script to do its magic. 1346 1349 */ 1347 1350 1348 - snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file); 1351 + snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s", 1352 + "hv_set_ifconfig", if_file); 1349 1353 if (system(cmd)) { 1350 1354 syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s", 1351 1355 cmd, errno, strerror(errno));