Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

Pull ARM SoC driver updates from Olof Johansson:
"This contains platform-related driver updates for ARM and ARM64.

Highlights:

- ARM SCMI (System Control & Management Interface) driver cleanups

- Hisilicon support for LPC bus w/ ACPI

- Reset driver updates for several platforms: Uniphier,

- Rockchip power domain bindings and hardware descriptions for
several SoCs.

- Tegra memory controller reset improvements"

* tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (59 commits)
ARM: tegra: fix compile-testing PCI host driver
soc: rockchip: power-domain: add power domain support for px30
dt-bindings: power: add binding for px30 power domains
dt-bindings: power: add PX30 SoCs header for power-domain
soc: rockchip: power-domain: add power domain support for rk3228
dt-bindings: power: add binding for rk3228 power domains
dt-bindings: power: add RK3228 SoCs header for power-domain
soc: rockchip: power-domain: add power domain support for rk3128
dt-bindings: power: add binding for rk3128 power domains
dt-bindings: power: add RK3128 SoCs header for power-domain
soc: rockchip: power-domain: add power domain support for rk3036
dt-bindings: power: add binding for rk3036 power domains
dt-bindings: power: add RK3036 SoCs header for power-domain
dt-bindings: memory: tegra: Remove Tegra114 SATA and AFI reset definitions
memory: tegra: Remove Tegra114 SATA and AFI reset definitions
memory: tegra: Register SMMU after MC driver became ready
soc: mediatek: remove unneeded semicolon
soc: mediatek: add a fixed wait for SRAM stable
soc: mediatek: introduce a CAPS flag for scp_domain_data
soc: mediatek: reuse regmap_read_poll_timeout helpers
...

+1686 -925
+4 -16
Documentation/devicetree/bindings/power/pd-samsung.txt
··· 15 15 Optional Properties: 16 16 - label: Human readable string with domain name. Will be visible in userspace 17 17 to let user to distinguish between multiple domains in SoC. 18 - - clocks: List of clock handles. The parent clocks of the input clocks to the 19 - devices in this power domain are set to oscclk before power gating 20 - and restored back after powering on a domain. This is required for 21 - all domains which are powered on and off and not required for unused 22 - domains. 23 - - clock-names: The following clocks can be specified: 24 - - oscclk: Oscillator clock. 25 - - clkN: Input clocks to the devices in this power domain. These clocks 26 - will be reparented to oscclk before switching power domain off. 27 - Their original parent will be brought back after turning on 28 - the domain. Maximum of 4 clocks (N = 0 to 3) are supported. 29 - - asbN: Clocks required by asynchronous bridges (ASB) present in 30 - the power domain. These clock should be enabled during power 31 - domain on/off operations. 32 18 - power-domains: phandle pointing to the parent power domain, for more details 33 19 see Documentation/devicetree/bindings/power/power_domain.txt 20 + 21 + Deprecated Properties: 22 + - clocks 23 + - clock-names 34 24 35 25 Node of a device using power domains must have a power-domains property 36 26 defined with a phandle to respective power domain. ··· 37 47 mfc_pd: power-domain@10044060 { 38 48 compatible = "samsung,exynos4210-pd"; 39 49 reg = <0x10044060 0x20>; 40 - clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_USER_ACLK333>; 41 - clock-names = "oscclk", "clk0"; 42 50 #power-domain-cells = <0>; 43 51 label = "MFC"; 44 52 };
+12
Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
··· 5 5 6 6 Required properties for power domain controller: 7 7 - compatible: Should be one of the following. 8 + "rockchip,px30-power-controller" - for PX30 SoCs. 9 + "rockchip,rk3036-power-controller" - for RK3036 SoCs. 10 + "rockchip,rk3128-power-controller" - for RK3128 SoCs. 11 + "rockchip,rk3228-power-controller" - for RK3228 SoCs. 8 12 "rockchip,rk3288-power-controller" - for RK3288 SoCs. 9 13 "rockchip,rk3328-power-controller" - for RK3328 SoCs. 10 14 "rockchip,rk3366-power-controller" - for RK3366 SoCs. ··· 21 17 22 18 Required properties for power domain sub nodes: 23 19 - reg: index of the power domain, should use macros in: 20 + "include/dt-bindings/power/px30-power.h" - for PX30 type power domain. 21 + "include/dt-bindings/power/rk3036-power.h" - for RK3036 type power domain. 22 + "include/dt-bindings/power/rk3128-power.h" - for RK3128 type power domain. 23 + "include/dt-bindings/power/rk3228-power.h" - for RK3228 type power domain. 24 24 "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain. 25 25 "include/dt-bindings/power/rk3328-power.h" - for RK3328 type power domain. 26 26 "include/dt-bindings/power/rk3366-power.h" - for RK3366 type power domain. ··· 101 93 containing a phandle to the power device node and an index specifying which 102 94 power domain to use. 103 95 The index should use macros in: 96 + "include/dt-bindings/power/px30-power.h" - for px30 type power domain. 97 + "include/dt-bindings/power/rk3036-power.h" - for rk3036 type power domain. 98 + "include/dt-bindings/power/rk3128-power.h" - for rk3128 type power domain. 99 + "include/dt-bindings/power/rk3128-power.h" - for rk3228 type power domain. 104 100 "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain. 105 101 "include/dt-bindings/power/rk3328-power.h" - for rk3328 type power domain. 106 102 "include/dt-bindings/power/rk3366-power.h" - for rk3366 type power domain.
-1
drivers/bus/Kconfig
··· 33 33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 34 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 35 35 select INDIRECT_PIO 36 - select MFD_CORE if ACPI 37 36 help 38 37 Driver to enable I/O access to devices attached to the Low Pin 39 38 Count bus on the HiSilicon Hip06/7 SoC.
-2
drivers/bus/arm-cci.c
··· 371 371 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)), 372 372 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)), 373 373 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) ); 374 - 375 - unreachable(); 376 374 } 377 375 378 376 /**
+97 -62
drivers/bus/hisi_lpc.c
··· 11 11 #include <linux/delay.h> 12 12 #include <linux/io.h> 13 13 #include <linux/logic_pio.h> 14 - #include <linux/mfd/core.h> 15 14 #include <linux/module.h> 16 15 #include <linux/of.h> 17 16 #include <linux/of_address.h> 18 17 #include <linux/of_platform.h> 19 18 #include <linux/pci.h> 19 + #include <linux/serial_8250.h> 20 20 #include <linux/slab.h> 21 21 22 22 #define DRV_NAME "hisi-lpc" ··· 341 341 }; 342 342 343 343 #ifdef CONFIG_ACPI 344 - #define MFD_CHILD_NAME_PREFIX DRV_NAME"-" 345 - #define MFD_CHILD_NAME_LEN (ACPI_ID_LEN + sizeof(MFD_CHILD_NAME_PREFIX) - 1) 346 - 347 - struct hisi_lpc_mfd_cell { 348 - struct mfd_cell_acpi_match acpi_match; 349 - char name[MFD_CHILD_NAME_LEN]; 350 - char pnpid[ACPI_ID_LEN]; 351 - }; 352 - 353 344 static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, 354 345 struct acpi_device *host, 355 346 struct resource *res) ··· 359 368 } 360 369 361 370 /* 362 - * hisi_lpc_acpi_set_io_res - set the resources for a child's MFD 371 + * hisi_lpc_acpi_set_io_res - set the resources for a child 363 372 * @child: the device node to be updated the I/O resource 364 373 * @hostdev: the device node associated with host controller 365 374 * @res: double pointer to be set to the address of translated resources ··· 443 452 return 0; 444 453 } 445 454 455 + static int hisi_lpc_acpi_remove_subdev(struct device *dev, void *unused) 456 + { 457 + platform_device_unregister(to_platform_device(dev)); 458 + return 0; 459 + } 460 + 461 + struct hisi_lpc_acpi_cell { 462 + const char *hid; 463 + const char *name; 464 + void *pdata; 465 + size_t pdata_size; 466 + }; 467 + 446 468 /* 447 469 * hisi_lpc_acpi_probe - probe children for ACPI FW 448 470 * @hostdev: LPC host device pointer 449 471 * 450 472 * Returns 0 when successful, and a negative value for failure. 451 473 * 452 - * Scan all child devices and create a per-device MFD with 453 - * logical PIO translated IO resources. 474 + * Create a platform device per child, fixing up the resources 475 + * from bus addresses to Logical PIO addresses. 476 + * 454 477 */ 455 478 static int hisi_lpc_acpi_probe(struct device *hostdev) 456 479 { 457 480 struct acpi_device *adev = ACPI_COMPANION(hostdev); 458 - struct hisi_lpc_mfd_cell *hisi_lpc_mfd_cells; 459 - struct mfd_cell *mfd_cells; 460 481 struct acpi_device *child; 461 - int size, ret, count = 0, cell_num = 0; 482 + int ret; 462 483 463 - list_for_each_entry(child, &adev->children, node) 464 - cell_num++; 465 - 466 - /* allocate the mfd cell and companion ACPI info, one per child */ 467 - size = sizeof(*mfd_cells) + sizeof(*hisi_lpc_mfd_cells); 468 - mfd_cells = devm_kcalloc(hostdev, cell_num, size, GFP_KERNEL); 469 - if (!mfd_cells) 470 - return -ENOMEM; 471 - 472 - hisi_lpc_mfd_cells = (struct hisi_lpc_mfd_cell *)&mfd_cells[cell_num]; 473 484 /* Only consider the children of the host */ 474 485 list_for_each_entry(child, &adev->children, node) { 475 - struct mfd_cell *mfd_cell = &mfd_cells[count]; 476 - struct hisi_lpc_mfd_cell *hisi_lpc_mfd_cell = 477 - &hisi_lpc_mfd_cells[count]; 478 - struct mfd_cell_acpi_match *acpi_match = 479 - &hisi_lpc_mfd_cell->acpi_match; 480 - char *name = hisi_lpc_mfd_cell[count].name; 481 - char *pnpid = hisi_lpc_mfd_cell[count].pnpid; 482 - struct mfd_cell_acpi_match match = { 483 - .pnpid = pnpid, 486 + const char *hid = acpi_device_hid(child); 487 + const struct hisi_lpc_acpi_cell *cell; 488 + struct platform_device *pdev; 489 + const struct resource *res; 490 + bool found = false; 491 + int num_res; 492 + 493 + ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, &res, 494 + &num_res); 495 + if (ret) { 496 + dev_warn(hostdev, "set resource fail (%d)\n", ret); 497 + goto fail; 498 + } 499 + 500 + cell = (struct hisi_lpc_acpi_cell []){ 501 + /* ipmi */ 502 + { 503 + .hid = "IPI0001", 504 + .name = "hisi-lpc-ipmi", 505 + }, 506 + /* 8250-compatible uart */ 507 + { 508 + .hid = "HISI1031", 509 + .name = "serial8250", 510 + .pdata = (struct plat_serial8250_port []) { 511 + { 512 + .iobase = res->start, 513 + .uartclk = 1843200, 514 + .iotype = UPIO_PORT, 515 + .flags = UPF_BOOT_AUTOCONF, 516 + }, 517 + {} 518 + }, 519 + .pdata_size = 2 * 520 + sizeof(struct plat_serial8250_port), 521 + }, 522 + {} 484 523 }; 485 524 486 - /* 487 - * For any instances of this host controller (Hip06 and Hip07 488 - * are the only chipsets), we would not have multiple slaves 489 - * with the same HID. And in any system we would have just one 490 - * controller active. So don't worrry about MFD name clashes. 491 - */ 492 - snprintf(name, MFD_CHILD_NAME_LEN, MFD_CHILD_NAME_PREFIX"%s", 493 - acpi_device_hid(child)); 494 - snprintf(pnpid, ACPI_ID_LEN, "%s", acpi_device_hid(child)); 495 - 496 - memcpy(acpi_match, &match, sizeof(*acpi_match)); 497 - mfd_cell->name = name; 498 - mfd_cell->acpi_match = acpi_match; 499 - 500 - ret = hisi_lpc_acpi_set_io_res(&child->dev, &adev->dev, 501 - &mfd_cell->resources, 502 - &mfd_cell->num_resources); 503 - if (ret) { 504 - dev_warn(&child->dev, "set resource fail (%d)\n", ret); 505 - return ret; 525 + for (; cell && cell->name; cell++) { 526 + if (!strcmp(cell->hid, hid)) { 527 + found = true; 528 + break; 529 + } 506 530 } 507 - count++; 508 - } 509 531 510 - ret = mfd_add_devices(hostdev, PLATFORM_DEVID_NONE, 511 - mfd_cells, cell_num, NULL, 0, NULL); 512 - if (ret) { 513 - dev_err(hostdev, "failed to add mfd cells (%d)\n", ret); 514 - return ret; 532 + if (!found) { 533 + dev_warn(hostdev, 534 + "could not find cell for child device (%s)\n", 535 + hid); 536 + ret = -ENODEV; 537 + goto fail; 538 + } 539 + 540 + pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO); 541 + if (!pdev) { 542 + ret = -ENOMEM; 543 + goto fail; 544 + } 545 + 546 + pdev->dev.parent = hostdev; 547 + ACPI_COMPANION_SET(&pdev->dev, child); 548 + 549 + ret = platform_device_add_resources(pdev, res, num_res); 550 + if (ret) 551 + goto fail; 552 + 553 + ret = platform_device_add_data(pdev, cell->pdata, 554 + cell->pdata_size); 555 + if (ret) 556 + goto fail; 557 + 558 + ret = platform_device_add(pdev); 559 + if (ret) 560 + goto fail; 561 + 562 + acpi_device_set_enumerated(child); 515 563 } 516 564 517 565 return 0; 566 + 567 + fail: 568 + device_for_each_child(hostdev, NULL, 569 + hisi_lpc_acpi_remove_subdev); 570 + return ret; 518 571 } 519 572 520 573 static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+2 -2
drivers/cpufreq/scmi-cpufreq.c
··· 117 117 return -ENODEV; 118 118 } 119 119 120 - ret = handle->perf_ops->add_opps_to_device(handle, cpu_dev); 120 + ret = handle->perf_ops->device_opps_add(handle, cpu_dev); 121 121 if (ret) { 122 122 dev_warn(cpu_dev, "failed to add opps to the device\n"); 123 123 return ret; ··· 164 164 /* SCMI allows DVFS request for any domain from any CPU */ 165 165 policy->dvfs_possible_from_any_cpu = true; 166 166 167 - latency = handle->perf_ops->get_transition_latency(handle, cpu_dev); 167 + latency = handle->perf_ops->transition_latency_get(handle, cpu_dev); 168 168 if (!latency) 169 169 latency = CPUFREQ_ETERNAL; 170 170
+24 -19
drivers/firmware/arm_scmi/base.c
··· 26 26 * scmi_base_attributes_get() - gets the implementation details 27 27 * that are associated with the base protocol. 28 28 * 29 - * @handle - SCMI entity handle 29 + * @handle: SCMI entity handle 30 30 * 31 31 * Return: 0 on success, else appropriate SCMI error. 32 32 */ ··· 37 37 struct scmi_msg_resp_base_attributes *attr_info; 38 38 struct scmi_revision_info *rev = handle->version; 39 39 40 - ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, 40 + ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 41 41 SCMI_PROTOCOL_BASE, 0, sizeof(*attr_info), &t); 42 42 if (ret) 43 43 return ret; ··· 49 49 rev->num_agents = attr_info->num_agents; 50 50 } 51 51 52 - scmi_one_xfer_put(handle, t); 52 + scmi_xfer_put(handle, t); 53 + 53 54 return ret; 54 55 } 55 56 56 57 /** 57 58 * scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string. 58 59 * 59 - * @handle - SCMI entity handle 60 - * @sub_vendor - specify true if sub-vendor ID is needed 60 + * @handle: SCMI entity handle 61 + * @sub_vendor: specify true if sub-vendor ID is needed 61 62 * 62 63 * Return: 0 on success, else appropriate SCMI error. 63 64 */ ··· 81 80 size = ARRAY_SIZE(rev->vendor_id); 82 81 } 83 82 84 - ret = scmi_one_xfer_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t); 83 + ret = scmi_xfer_get_init(handle, cmd, SCMI_PROTOCOL_BASE, 0, size, &t); 85 84 if (ret) 86 85 return ret; 87 86 ··· 89 88 if (!ret) 90 89 memcpy(vendor_id, t->rx.buf, size); 91 90 92 - scmi_one_xfer_put(handle, t); 91 + scmi_xfer_put(handle, t); 92 + 93 93 return ret; 94 94 } 95 95 ··· 99 97 * implementation 32-bit version. The format of the version number is 100 98 * vendor-specific 101 99 * 102 - * @handle - SCMI entity handle 100 + * @handle: SCMI entity handle 103 101 * 104 102 * Return: 0 on success, else appropriate SCMI error. 105 103 */ ··· 111 109 struct scmi_xfer *t; 112 110 struct scmi_revision_info *rev = handle->version; 113 111 114 - ret = scmi_one_xfer_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION, 112 + ret = scmi_xfer_get_init(handle, BASE_DISCOVER_IMPLEMENT_VERSION, 115 113 SCMI_PROTOCOL_BASE, 0, sizeof(*impl_ver), &t); 116 114 if (ret) 117 115 return ret; ··· 122 120 rev->impl_ver = le32_to_cpu(*impl_ver); 123 121 } 124 122 125 - scmi_one_xfer_put(handle, t); 123 + scmi_xfer_put(handle, t); 124 + 126 125 return ret; 127 126 } 128 127 ··· 131 128 * scmi_base_implementation_list_get() - gets the list of protocols it is 132 129 * OSPM is allowed to access 133 130 * 134 - * @handle - SCMI entity handle 135 - * @protocols_imp - pointer to hold the list of protocol identifiers 131 + * @handle: SCMI entity handle 132 + * @protocols_imp: pointer to hold the list of protocol identifiers 136 133 * 137 134 * Return: 0 on success, else appropriate SCMI error. 138 135 */ ··· 146 143 u32 tot_num_ret = 0, loop_num_ret; 147 144 struct device *dev = handle->dev; 148 145 149 - ret = scmi_one_xfer_init(handle, BASE_DISCOVER_LIST_PROTOCOLS, 146 + ret = scmi_xfer_get_init(handle, BASE_DISCOVER_LIST_PROTOCOLS, 150 147 SCMI_PROTOCOL_BASE, sizeof(*num_skip), 0, &t); 151 148 if (ret) 152 149 return ret; ··· 175 172 tot_num_ret += loop_num_ret; 176 173 } while (loop_num_ret); 177 174 178 - scmi_one_xfer_put(handle, t); 175 + scmi_xfer_put(handle, t); 176 + 179 177 return ret; 180 178 } 181 179 182 180 /** 183 181 * scmi_base_discover_agent_get() - discover the name of an agent 184 182 * 185 - * @handle - SCMI entity handle 186 - * @id - Agent identifier 187 - * @name - Agent identifier ASCII string 183 + * @handle: SCMI entity handle 184 + * @id: Agent identifier 185 + * @name: Agent identifier ASCII string 188 186 * 189 187 * An agent id of 0 is reserved to identify the platform itself. 190 188 * Generally operating system is represented as "OSPM" ··· 198 194 int ret; 199 195 struct scmi_xfer *t; 200 196 201 - ret = scmi_one_xfer_init(handle, BASE_DISCOVER_AGENT, 197 + ret = scmi_xfer_get_init(handle, BASE_DISCOVER_AGENT, 202 198 SCMI_PROTOCOL_BASE, sizeof(__le32), 203 199 SCMI_MAX_STR_SIZE, &t); 204 200 if (ret) ··· 210 206 if (!ret) 211 207 memcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE); 212 208 213 - scmi_one_xfer_put(handle, t); 209 + scmi_xfer_put(handle, t); 210 + 214 211 return ret; 215 212 } 216 213
+12 -10
drivers/firmware/arm_scmi/bus.c
··· 125 125 int id, retval; 126 126 struct scmi_device *scmi_dev; 127 127 128 - id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); 129 - if (id < 0) 130 - return NULL; 131 - 132 128 scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL); 133 129 if (!scmi_dev) 134 - goto no_mem; 130 + return NULL; 131 + 132 + id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); 133 + if (id < 0) 134 + goto free_mem; 135 135 136 136 scmi_dev->id = id; 137 137 scmi_dev->protocol_id = protocol; ··· 141 141 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); 142 142 143 143 retval = device_register(&scmi_dev->dev); 144 - if (!retval) 145 - return scmi_dev; 144 + if (retval) 145 + goto put_dev; 146 146 147 + return scmi_dev; 148 + put_dev: 147 149 put_device(&scmi_dev->dev); 148 - kfree(scmi_dev); 149 - no_mem: 150 150 ida_simple_remove(&scmi_bus_id, id); 151 + free_mem: 152 + kfree(scmi_dev); 151 153 return NULL; 152 154 } 153 155 ··· 173 171 spin_lock(&protocol_lock); 174 172 ret = idr_alloc(&scmi_protocols, fn, protocol_id, protocol_id + 1, 175 173 GFP_ATOMIC); 174 + spin_unlock(&protocol_lock); 176 175 if (ret != protocol_id) 177 176 pr_err("unable to allocate SCMI idr slot, err %d\n", ret); 178 - spin_unlock(&protocol_lock); 179 177 180 178 return ret; 181 179 }
+12 -12
drivers/firmware/arm_scmi/clock.c
··· 77 77 struct scmi_xfer *t; 78 78 struct scmi_msg_resp_clock_protocol_attributes *attr; 79 79 80 - ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, 80 + ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 81 81 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); 82 82 if (ret) 83 83 return ret; ··· 90 90 ci->max_async_req = attr->max_async_req; 91 91 } 92 92 93 - scmi_one_xfer_put(handle, t); 93 + scmi_xfer_put(handle, t); 94 94 return ret; 95 95 } 96 96 ··· 101 101 struct scmi_xfer *t; 102 102 struct scmi_msg_resp_clock_attributes *attr; 103 103 104 - ret = scmi_one_xfer_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, 104 + ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, 105 105 sizeof(clk_id), sizeof(*attr), &t); 106 106 if (ret) 107 107 return ret; ··· 115 115 else 116 116 clk->name[0] = '\0'; 117 117 118 - scmi_one_xfer_put(handle, t); 118 + scmi_xfer_put(handle, t); 119 119 return ret; 120 120 } 121 121 ··· 132 132 struct scmi_msg_clock_describe_rates *clk_desc; 133 133 struct scmi_msg_resp_clock_describe_rates *rlist; 134 134 135 - ret = scmi_one_xfer_init(handle, CLOCK_DESCRIBE_RATES, 135 + ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, 136 136 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); 137 137 if (ret) 138 138 return ret; ··· 186 186 clk->list.num_rates = tot_rate_cnt; 187 187 188 188 err: 189 - scmi_one_xfer_put(handle, t); 189 + scmi_xfer_put(handle, t); 190 190 return ret; 191 191 } 192 192 ··· 196 196 int ret; 197 197 struct scmi_xfer *t; 198 198 199 - ret = scmi_one_xfer_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, 199 + ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, 200 200 sizeof(__le32), sizeof(u64), &t); 201 201 if (ret) 202 202 return ret; ··· 211 211 *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; 212 212 } 213 213 214 - scmi_one_xfer_put(handle, t); 214 + scmi_xfer_put(handle, t); 215 215 return ret; 216 216 } 217 217 ··· 222 222 struct scmi_xfer *t; 223 223 struct scmi_clock_set_rate *cfg; 224 224 225 - ret = scmi_one_xfer_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, 225 + ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, 226 226 sizeof(*cfg), 0, &t); 227 227 if (ret) 228 228 return ret; ··· 235 235 236 236 ret = scmi_do_xfer(handle, t); 237 237 238 - scmi_one_xfer_put(handle, t); 238 + scmi_xfer_put(handle, t); 239 239 return ret; 240 240 } 241 241 ··· 246 246 struct scmi_xfer *t; 247 247 struct scmi_clock_set_config *cfg; 248 248 249 - ret = scmi_one_xfer_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, 249 + ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, 250 250 sizeof(*cfg), 0, &t); 251 251 if (ret) 252 252 return ret; ··· 257 257 258 258 ret = scmi_do_xfer(handle, t); 259 259 260 - scmi_one_xfer_put(handle, t); 260 + scmi_xfer_put(handle, t); 261 261 return ret; 262 262 } 263 263
+12 -10
drivers/firmware/arm_scmi/common.h
··· 7 7 * Copyright (C) 2018 ARM Ltd. 8 8 */ 9 9 10 + #include <linux/bitfield.h> 10 11 #include <linux/completion.h> 11 12 #include <linux/device.h> 12 13 #include <linux/errno.h> ··· 15 14 #include <linux/scmi_protocol.h> 16 15 #include <linux/types.h> 17 16 18 - #define PROTOCOL_REV_MINOR_BITS 16 19 - #define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1) 20 - #define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS) 21 - #define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK) 17 + #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) 18 + #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) 19 + #define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) 20 + #define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))) 22 21 #define MAX_PROTOCOLS_IMP 16 23 22 #define MAX_OPPS 16 24 23 ··· 51 50 * @id: The identifier of the command being sent 52 51 * @protocol_id: The identifier of the protocol used to send @id command 53 52 * @seq: The token to identify the message. when a message/command returns, 54 - * the platform returns the whole message header unmodified including 55 - * the token. 53 + * the platform returns the whole message header unmodified including 54 + * the token 55 + * @status: Status of the transfer once it's complete 56 + * @poll_completion: Indicate if the transfer needs to be polled for 57 + * completion or interrupt mode is used 56 58 */ 57 59 struct scmi_msg_hdr { 58 60 u8 id; ··· 86 82 * buffer for the rx path as we use for the tx path. 87 83 * @done: completion event 88 84 */ 89 - 90 85 struct scmi_xfer { 91 - void *con_priv; 92 86 struct scmi_msg_hdr hdr; 93 87 struct scmi_msg tx; 94 88 struct scmi_msg rx; 95 89 struct completion done; 96 90 }; 97 91 98 - void scmi_one_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); 92 + void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); 99 93 int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); 100 - int scmi_one_xfer_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, 94 + int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, 101 95 size_t tx_size, size_t rx_size, struct scmi_xfer **p); 102 96 int scmi_handle_put(const struct scmi_handle *handle); 103 97 struct scmi_handle *scmi_handle_get(struct device *dev);
+54 -55
drivers/firmware/arm_scmi/driver.c
··· 29 29 30 30 #include "common.h" 31 31 32 - #define MSG_ID_SHIFT 0 33 - #define MSG_ID_MASK 0xff 34 - #define MSG_TYPE_SHIFT 8 35 - #define MSG_TYPE_MASK 0x3 36 - #define MSG_PROTOCOL_ID_SHIFT 10 37 - #define MSG_PROTOCOL_ID_MASK 0xff 38 - #define MSG_TOKEN_ID_SHIFT 18 39 - #define MSG_TOKEN_ID_MASK 0x3ff 40 - #define MSG_XTRACT_TOKEN(header) \ 41 - (((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK) 32 + #define MSG_ID_MASK GENMASK(7, 0) 33 + #define MSG_TYPE_MASK GENMASK(9, 8) 34 + #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) 35 + #define MSG_TOKEN_ID_MASK GENMASK(27, 18) 36 + #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) 37 + #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) 42 38 43 39 enum scmi_error_codes { 44 40 SCMI_SUCCESS = 0, /* Success */ ··· 51 55 SCMI_ERR_MAX 52 56 }; 53 57 54 - /* List of all SCMI devices active in system */ 58 + /* List of all SCMI devices active in system */ 55 59 static LIST_HEAD(scmi_list); 56 60 /* Protection for the entire list */ 57 61 static DEFINE_MUTEX(scmi_list_mutex); ··· 68 72 struct scmi_xfers_info { 69 73 struct scmi_xfer *xfer_block; 70 74 unsigned long *xfer_alloc_table; 71 - /* protect transfer allocation */ 72 75 spinlock_t xfer_lock; 73 76 }; 74 77 ··· 93 98 * @payload: Transmit/Receive mailbox channel payload area 94 99 * @dev: Reference to device in the SCMI hierarchy corresponding to this 95 100 * channel 101 + * @handle: Pointer to SCMI entity handle 96 102 */ 97 103 struct scmi_chan_info { 98 104 struct mbox_client cl; ··· 104 108 }; 105 109 106 110 /** 107 - * struct scmi_info - Structure representing a SCMI instance 111 + * struct scmi_info - Structure representing a SCMI instance 108 112 * 109 113 * @dev: Device pointer 110 114 * @desc: SoC description for this instance ··· 113 117 * implementation version and (sub-)vendor identification. 114 118 * @minfo: Message info 115 119 * @tx_idr: IDR object to map protocol id to channel info pointer 116 - * @protocols_imp: list of protocols implemented, currently maximum of 120 + * @protocols_imp: List of protocols implemented, currently maximum of 117 121 * MAX_PROTOCOLS_IMP elements allocated by the base protocol 118 - * @node: list head 122 + * @node: List head 119 123 * @users: Number of users of this instance 120 124 */ 121 125 struct scmi_info { ··· 221 225 222 226 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header)); 223 227 224 - /* 225 - * Are we even expecting this? 226 - */ 228 + /* Are we even expecting this? */ 227 229 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { 228 230 dev_err(dev, "message for %d is not expected!\n", xfer_id); 229 231 return; ··· 246 252 * 247 253 * @hdr: pointer to header containing all the information on message id, 248 254 * protocol id and sequence id. 255 + * 256 + * Return: 32-bit packed command header to be sent to the platform. 249 257 */ 250 258 static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) 251 259 { 252 - return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) | 253 - ((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) | 254 - ((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT); 260 + return FIELD_PREP(MSG_ID_MASK, hdr->id) | 261 + FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | 262 + FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); 255 263 } 256 264 257 265 /** ··· 282 286 } 283 287 284 288 /** 285 - * scmi_one_xfer_get() - Allocate one message 289 + * scmi_xfer_get() - Allocate one message 286 290 * 287 - * @handle: SCMI entity handle 291 + * @handle: Pointer to SCMI entity handle 288 292 * 289 293 * Helper function which is used by various command functions that are 290 294 * exposed to clients of this driver for allocating a message traffic event. ··· 295 299 * 296 300 * Return: 0 if all went fine, else corresponding error. 297 301 */ 298 - static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle) 302 + static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) 299 303 { 300 304 u16 xfer_id; 301 305 struct scmi_xfer *xfer; ··· 324 328 } 325 329 326 330 /** 327 - * scmi_one_xfer_put() - Release a message 331 + * scmi_xfer_put() - Release a message 328 332 * 329 - * @minfo: transfer info pointer 330 - * @xfer: message that was reserved by scmi_one_xfer_get 333 + * @handle: Pointer to SCMI entity handle 334 + * @xfer: message that was reserved by scmi_xfer_get 331 335 * 332 336 * This holds a spinlock to maintain integrity of internal data structures. 333 337 */ 334 - void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) 338 + void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) 335 339 { 336 340 unsigned long flags; 337 341 struct scmi_info *info = handle_to_scmi_info(handle); ··· 374 378 /** 375 379 * scmi_do_xfer() - Do one transfer 376 380 * 377 - * @info: Pointer to SCMI entity information 381 + * @handle: Pointer to SCMI entity handle 378 382 * @xfer: Transfer to initiate and wait for response 379 383 * 380 384 * Return: -ETIMEDOUT in case of no response, if transmit error, 381 - * return corresponding error, else if all goes well, 382 - * return 0. 385 + * return corresponding error, else if all goes well, 386 + * return 0. 383 387 */ 384 388 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) 385 389 { ··· 436 440 } 437 441 438 442 /** 439 - * scmi_one_xfer_init() - Allocate and initialise one message 443 + * scmi_xfer_get_init() - Allocate and initialise one message 440 444 * 441 - * @handle: SCMI entity handle 445 + * @handle: Pointer to SCMI entity handle 442 446 * @msg_id: Message identifier 443 - * @msg_prot_id: Protocol identifier for the message 447 + * @prot_id: Protocol identifier for the message 444 448 * @tx_size: transmit message size 445 449 * @rx_size: receive message size 446 450 * @p: pointer to the allocated and initialised message 447 451 * 448 - * This function allocates the message using @scmi_one_xfer_get and 452 + * This function allocates the message using @scmi_xfer_get and 449 453 * initialise the header. 450 454 * 451 455 * Return: 0 if all went fine with @p pointing to message, else 452 456 * corresponding error. 453 457 */ 454 - int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, 458 + int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id, 455 459 size_t tx_size, size_t rx_size, struct scmi_xfer **p) 456 460 { 457 461 int ret; ··· 464 468 tx_size > info->desc->max_msg_size) 465 469 return -ERANGE; 466 470 467 - xfer = scmi_one_xfer_get(handle); 471 + xfer = scmi_xfer_get(handle); 468 472 if (IS_ERR(xfer)) { 469 473 ret = PTR_ERR(xfer); 470 474 dev_err(dev, "failed to get free message slot(%d)\n", ret); ··· 478 482 xfer->hdr.poll_completion = false; 479 483 480 484 *p = xfer; 485 + 481 486 return 0; 482 487 } 483 488 484 489 /** 485 490 * scmi_version_get() - command to get the revision of the SCMI entity 486 491 * 487 - * @handle: Handle to SCMI entity information 492 + * @handle: Pointer to SCMI entity handle 493 + * @protocol: Protocol identifier for the message 494 + * @version: Holds returned version of protocol. 488 495 * 489 496 * Updates the SCMI information in the internal data structure. 490 497 * ··· 500 501 __le32 *rev_info; 501 502 struct scmi_xfer *t; 502 503 503 - ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0, 504 + ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0, 504 505 sizeof(*version), &t); 505 506 if (ret) 506 507 return ret; ··· 511 512 *version = le32_to_cpu(*rev_info); 512 513 } 513 514 514 - scmi_one_xfer_put(handle, t); 515 + scmi_xfer_put(handle, t); 515 516 return ret; 516 517 } 517 518 ··· 539 540 } 540 541 541 542 /** 542 - * scmi_handle_get() - Get the SCMI handle for a device 543 + * scmi_handle_get() - Get the SCMI handle for a device 543 544 * 544 545 * @dev: pointer to device for which we want SCMI handle 545 546 * 546 547 * NOTE: The function does not track individual clients of the framework 547 - * and is expected to be maintained by caller of SCMI protocol library. 548 + * and is expected to be maintained by caller of SCMI protocol library. 548 549 * scmi_handle_put must be balanced with successful scmi_handle_get 549 550 * 550 551 * Return: pointer to handle if successful, NULL on error ··· 575 576 * @handle: handle acquired by scmi_handle_get 576 577 * 577 578 * NOTE: The function does not track individual clients of the framework 578 - * and is expected to be maintained by caller of SCMI protocol library. 579 + * and is expected to be maintained by caller of SCMI protocol library. 579 580 * scmi_handle_put must be balanced with successful scmi_handle_get 580 581 * 581 582 * Return: 0 is successfully released ··· 598 599 } 599 600 600 601 static const struct scmi_desc scmi_generic_desc = { 601 - .max_rx_timeout_ms = 30, /* we may increase this if required */ 602 + .max_rx_timeout_ms = 30, /* We may increase this if required */ 602 603 .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */ 603 604 .max_msg_size = 128, 604 605 }; ··· 620 621 struct scmi_xfers_info *info = &sinfo->minfo; 621 622 622 623 /* Pre-allocated messages, no more than what hdr.seq can support */ 623 - if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) { 624 - dev_err(dev, "Maximum message of %d exceeds supported %d\n", 625 - desc->max_msg, MSG_TOKEN_ID_MASK + 1); 624 + if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { 625 + dev_err(dev, "Maximum message of %d exceeds supported %ld\n", 626 + desc->max_msg, MSG_TOKEN_MAX); 626 627 return -EINVAL; 627 628 } 628 629 ··· 635 636 sizeof(long), GFP_KERNEL); 636 637 if (!info->xfer_alloc_table) 637 638 return -ENOMEM; 638 - 639 - bitmap_zero(info->xfer_alloc_table, desc->max_msg); 640 639 641 640 /* Pre-initialize the buffer pointer to pre-allocated buffers */ 642 641 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { ··· 687 690 list_del(&info->node); 688 691 mutex_unlock(&scmi_list_mutex); 689 692 690 - if (!ret) { 691 - /* Safe to free channels since no more users */ 692 - ret = idr_for_each(idr, scmi_mbox_free_channel, idr); 693 - idr_destroy(&info->tx_idr); 694 - } 693 + if (ret) 694 + return ret; 695 + 696 + /* Safe to free channels since no more users */ 697 + ret = idr_for_each(idr, scmi_mbox_free_channel, idr); 698 + idr_destroy(&info->tx_idr); 695 699 696 700 return ret; 697 701 } ··· 839 841 if (of_property_read_u32(child, "reg", &prot_id)) 840 842 continue; 841 843 842 - prot_id &= MSG_PROTOCOL_ID_MASK; 844 + if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id)) 845 + dev_err(dev, "Out of range protocol %d\n", prot_id); 843 846 844 847 if (!scmi_is_protocol_implemented(handle, prot_id)) { 845 848 dev_err(dev, "SCMI protocol %d not implemented\n",
+19 -19
drivers/firmware/arm_scmi/perf.c
··· 115 115 struct scmi_xfer *t; 116 116 struct scmi_msg_resp_perf_attributes *attr; 117 117 118 - ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, 118 + ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 119 119 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t); 120 120 if (ret) 121 121 return ret; ··· 133 133 pi->stats_size = le32_to_cpu(attr->stats_size); 134 134 } 135 135 136 - scmi_one_xfer_put(handle, t); 136 + scmi_xfer_put(handle, t); 137 137 return ret; 138 138 } 139 139 ··· 145 145 struct scmi_xfer *t; 146 146 struct scmi_msg_resp_perf_domain_attributes *attr; 147 147 148 - ret = scmi_one_xfer_init(handle, PERF_DOMAIN_ATTRIBUTES, 148 + ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES, 149 149 SCMI_PROTOCOL_PERF, sizeof(domain), 150 150 sizeof(*attr), &t); 151 151 if (ret) ··· 171 171 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 172 172 } 173 173 174 - scmi_one_xfer_put(handle, t); 174 + scmi_xfer_put(handle, t); 175 175 return ret; 176 176 } 177 177 ··· 194 194 struct scmi_msg_perf_describe_levels *dom_info; 195 195 struct scmi_msg_resp_perf_describe_levels *level_info; 196 196 197 - ret = scmi_one_xfer_init(handle, PERF_DESCRIBE_LEVELS, 197 + ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS, 198 198 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t); 199 199 if (ret) 200 200 return ret; ··· 237 237 } while (num_returned && num_remaining); 238 238 239 239 perf_dom->opp_count = tot_opp_cnt; 240 - scmi_one_xfer_put(handle, t); 240 + scmi_xfer_put(handle, t); 241 241 242 242 sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL); 243 243 return ret; ··· 250 250 struct scmi_xfer *t; 251 251 struct scmi_perf_set_limits *limits; 252 252 253 - ret = scmi_one_xfer_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF, 253 + ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF, 254 254 sizeof(*limits), 0, &t); 255 255 if (ret) 256 256 return ret; ··· 262 262 263 263 ret = scmi_do_xfer(handle, t); 264 264 265 - scmi_one_xfer_put(handle, t); 265 + scmi_xfer_put(handle, t); 266 266 return ret; 267 267 } 268 268 ··· 273 273 struct scmi_xfer *t; 274 274 struct scmi_perf_get_limits *limits; 275 275 276 - ret = scmi_one_xfer_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF, 276 + ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF, 277 277 sizeof(__le32), 0, &t); 278 278 if (ret) 279 279 return ret; ··· 288 288 *min_perf = le32_to_cpu(limits->min_level); 289 289 } 290 290 291 - scmi_one_xfer_put(handle, t); 291 + scmi_xfer_put(handle, t); 292 292 return ret; 293 293 } 294 294 ··· 299 299 struct scmi_xfer *t; 300 300 struct scmi_perf_set_level *lvl; 301 301 302 - ret = scmi_one_xfer_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF, 302 + ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF, 303 303 sizeof(*lvl), 0, &t); 304 304 if (ret) 305 305 return ret; ··· 311 311 312 312 ret = scmi_do_xfer(handle, t); 313 313 314 - scmi_one_xfer_put(handle, t); 314 + scmi_xfer_put(handle, t); 315 315 return ret; 316 316 } 317 317 ··· 321 321 int ret; 322 322 struct scmi_xfer *t; 323 323 324 - ret = scmi_one_xfer_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF, 324 + ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF, 325 325 sizeof(u32), sizeof(u32), &t); 326 326 if (ret) 327 327 return ret; ··· 333 333 if (!ret) 334 334 *level = le32_to_cpu(*(__le32 *)t->rx.buf); 335 335 336 - scmi_one_xfer_put(handle, t); 336 + scmi_xfer_put(handle, t); 337 337 return ret; 338 338 } 339 339 ··· 349 349 return clkspec.args[0]; 350 350 } 351 351 352 - static int scmi_dvfs_add_opps_to_device(const struct scmi_handle *handle, 353 - struct device *dev) 352 + static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle, 353 + struct device *dev) 354 354 { 355 355 int idx, ret, domain; 356 356 unsigned long freq; ··· 383 383 return 0; 384 384 } 385 385 386 - static int scmi_dvfs_get_transition_latency(const struct scmi_handle *handle, 386 + static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle, 387 387 struct device *dev) 388 388 { 389 389 struct perf_dom_info *dom; ··· 432 432 .level_set = scmi_perf_level_set, 433 433 .level_get = scmi_perf_level_get, 434 434 .device_domain_id = scmi_dev_domain_id, 435 - .get_transition_latency = scmi_dvfs_get_transition_latency, 436 - .add_opps_to_device = scmi_dvfs_add_opps_to_device, 435 + .transition_latency_get = scmi_dvfs_transition_latency_get, 436 + .device_opps_add = scmi_dvfs_device_opps_add, 437 437 .freq_set = scmi_dvfs_freq_set, 438 438 .freq_get = scmi_dvfs_freq_get, 439 439 };
+8 -8
drivers/firmware/arm_scmi/power.c
··· 63 63 struct scmi_xfer *t; 64 64 struct scmi_msg_resp_power_attributes *attr; 65 65 66 - ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, 66 + ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 67 67 SCMI_PROTOCOL_POWER, 0, sizeof(*attr), &t); 68 68 if (ret) 69 69 return ret; ··· 78 78 pi->stats_size = le32_to_cpu(attr->stats_size); 79 79 } 80 80 81 - scmi_one_xfer_put(handle, t); 81 + scmi_xfer_put(handle, t); 82 82 return ret; 83 83 } 84 84 ··· 90 90 struct scmi_xfer *t; 91 91 struct scmi_msg_resp_power_domain_attributes *attr; 92 92 93 - ret = scmi_one_xfer_init(handle, POWER_DOMAIN_ATTRIBUTES, 93 + ret = scmi_xfer_get_init(handle, POWER_DOMAIN_ATTRIBUTES, 94 94 SCMI_PROTOCOL_POWER, sizeof(domain), 95 95 sizeof(*attr), &t); 96 96 if (ret) ··· 109 109 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 110 110 } 111 111 112 - scmi_one_xfer_put(handle, t); 112 + scmi_xfer_put(handle, t); 113 113 return ret; 114 114 } 115 115 ··· 120 120 struct scmi_xfer *t; 121 121 struct scmi_power_set_state *st; 122 122 123 - ret = scmi_one_xfer_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER, 123 + ret = scmi_xfer_get_init(handle, POWER_STATE_SET, SCMI_PROTOCOL_POWER, 124 124 sizeof(*st), 0, &t); 125 125 if (ret) 126 126 return ret; ··· 132 132 133 133 ret = scmi_do_xfer(handle, t); 134 134 135 - scmi_one_xfer_put(handle, t); 135 + scmi_xfer_put(handle, t); 136 136 return ret; 137 137 } 138 138 ··· 142 142 int ret; 143 143 struct scmi_xfer *t; 144 144 145 - ret = scmi_one_xfer_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER, 145 + ret = scmi_xfer_get_init(handle, POWER_STATE_GET, SCMI_PROTOCOL_POWER, 146 146 sizeof(u32), sizeof(u32), &t); 147 147 if (ret) 148 148 return ret; ··· 153 153 if (!ret) 154 154 *state = le32_to_cpu(*(__le32 *)t->rx.buf); 155 155 156 - scmi_one_xfer_put(handle, t); 156 + scmi_xfer_put(handle, t); 157 157 return ret; 158 158 } 159 159
+10 -10
drivers/firmware/arm_scmi/sensors.c
··· 79 79 struct scmi_xfer *t; 80 80 struct scmi_msg_resp_sensor_attributes *attr; 81 81 82 - ret = scmi_one_xfer_init(handle, PROTOCOL_ATTRIBUTES, 82 + ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 83 83 SCMI_PROTOCOL_SENSOR, 0, sizeof(*attr), &t); 84 84 if (ret) 85 85 return ret; ··· 95 95 si->reg_size = le32_to_cpu(attr->reg_size); 96 96 } 97 97 98 - scmi_one_xfer_put(handle, t); 98 + scmi_xfer_put(handle, t); 99 99 return ret; 100 100 } 101 101 ··· 108 108 struct scmi_xfer *t; 109 109 struct scmi_msg_resp_sensor_description *buf; 110 110 111 - ret = scmi_one_xfer_init(handle, SENSOR_DESCRIPTION_GET, 111 + ret = scmi_xfer_get_init(handle, SENSOR_DESCRIPTION_GET, 112 112 SCMI_PROTOCOL_SENSOR, sizeof(__le32), 0, &t); 113 113 if (ret) 114 114 return ret; ··· 150 150 */ 151 151 } while (num_returned && num_remaining); 152 152 153 - scmi_one_xfer_put(handle, t); 153 + scmi_xfer_put(handle, t); 154 154 return ret; 155 155 } 156 156 ··· 162 162 struct scmi_xfer *t; 163 163 struct scmi_msg_set_sensor_config *cfg; 164 164 165 - ret = scmi_one_xfer_init(handle, SENSOR_CONFIG_SET, 165 + ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET, 166 166 SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t); 167 167 if (ret) 168 168 return ret; ··· 173 173 174 174 ret = scmi_do_xfer(handle, t); 175 175 176 - scmi_one_xfer_put(handle, t); 176 + scmi_xfer_put(handle, t); 177 177 return ret; 178 178 } 179 179 ··· 185 185 struct scmi_xfer *t; 186 186 struct scmi_msg_set_sensor_trip_point *trip; 187 187 188 - ret = scmi_one_xfer_init(handle, SENSOR_TRIP_POINT_SET, 188 + ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_SET, 189 189 SCMI_PROTOCOL_SENSOR, sizeof(*trip), 0, &t); 190 190 if (ret) 191 191 return ret; ··· 198 198 199 199 ret = scmi_do_xfer(handle, t); 200 200 201 - scmi_one_xfer_put(handle, t); 201 + scmi_xfer_put(handle, t); 202 202 return ret; 203 203 } 204 204 ··· 209 209 struct scmi_xfer *t; 210 210 struct scmi_msg_sensor_reading_get *sensor; 211 211 212 - ret = scmi_one_xfer_init(handle, SENSOR_READING_GET, 212 + ret = scmi_xfer_get_init(handle, SENSOR_READING_GET, 213 213 SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 214 214 sizeof(u64), &t); 215 215 if (ret) ··· 227 227 *value |= (u64)le32_to_cpu(*(pval + 1)) << 32; 228 228 } 229 229 230 - scmi_one_xfer_put(handle, t); 230 + scmi_xfer_put(handle, t); 231 231 return ret; 232 232 } 233 233
+1 -9
drivers/firmware/ti_sci.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Texas Instruments System Control Interface Protocol Driver 3 4 * 4 5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ 5 6 * Nishanth Menon 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License version 2 as 9 - * published by the Free Software Foundation. 10 - * 11 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 12 - * kind, whether express or implied; without even the implied warranty 13 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 7 */ 16 8 17 9 #define pr_fmt(fmt) "%s: " fmt, __func__
+1 -29
drivers/firmware/ti_sci.h
··· 1 + // SPDX-License-Identifier: BSD-3-Clause 1 2 /* 2 3 * Texas Instruments System Control Interface (TISCI) Protocol 3 4 * ··· 7 6 * See: http://processors.wiki.ti.com/index.php/TISCI for details 8 7 * 9 8 * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ 10 - * 11 - * Redistribution and use in source and binary forms, with or without 12 - * modification, are permitted provided that the following conditions 13 - * are met: 14 - * 15 - * Redistributions of source code must retain the above copyright 16 - * notice, this list of conditions and the following disclaimer. 17 - * 18 - * Redistributions in binary form must reproduce the above copyright 19 - * notice, this list of conditions and the following disclaimer in the 20 - * documentation and/or other materials provided with the 21 - * distribution. 22 - * 23 - * Neither the name of Texas Instruments Incorporated nor the names of 24 - * its contributors may be used to endorse or promote products derived 25 - * from this software without specific prior written permission. 26 - * 27 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 - * 39 9 */ 40 10 41 11 #ifndef __TI_SCI_H
-10
drivers/memory/Kconfig
··· 104 104 Armada 370 and Armada XP. This controller allows to handle flash 105 105 devices such as NOR, NAND, SRAM, and FPGA. 106 106 107 - config TEGRA20_MC 108 - bool "Tegra20 Memory Controller(MC) driver" 109 - default y 110 - depends on ARCH_TEGRA_2x_SOC 111 - help 112 - This driver is for the Memory Controller(MC) module available 113 - in Tegra20 SoCs, mainly for a address translation fault 114 - analysis, especially for IOMMU/GART(Graphics Address 115 - Relocation Table) module. 116 - 117 107 config FSL_CORENET_CF 118 108 tristate "Freescale CoreNet Error Reporting" 119 109 depends on FSL_SOC_BOOKE
-1
drivers/memory/Makefile
··· 16 16 obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o 17 17 obj-$(CONFIG_FSL_IFC) += fsl_ifc.o 18 18 obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o 19 - obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o 20 19 obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o 21 20 obj-$(CONFIG_MTK_SMI) += mtk-smi.o 22 21 obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
+12 -34
drivers/memory/brcmstb_dpfe.c
··· 176 176 void __iomem *dmem; 177 177 void __iomem *imem; 178 178 struct device *dev; 179 - unsigned int index; 180 179 struct mutex lock; 181 180 }; 182 181 ··· 673 674 { 674 675 struct device *dev = &pdev->dev; 675 676 struct private_data *priv; 676 - struct device *dpfe_dev; 677 677 struct init_data init; 678 678 struct resource *res; 679 - u32 index; 680 679 int ret; 681 680 682 681 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 683 686 684 687 mutex_init(&priv->lock); 685 688 platform_set_drvdata(pdev, priv); 686 - 687 - /* Cell index is optional; default to 0 if not present. */ 688 - ret = of_property_read_u32(dev->of_node, "cell-index", &index); 689 - if (ret) 690 - index = 0; 691 689 692 690 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu"); 693 691 priv->regs = devm_ioremap_resource(dev, res); ··· 707 715 708 716 ret = brcmstb_dpfe_download_firmware(pdev, &init); 709 717 if (ret) 710 - goto err; 718 + return ret; 711 719 712 - dpfe_dev = devm_kzalloc(dev, sizeof(*dpfe_dev), GFP_KERNEL); 713 - if (!dpfe_dev) { 714 - ret = -ENOMEM; 715 - goto err; 716 - } 717 - 718 - priv->dev = dpfe_dev; 719 - priv->index = index; 720 - 721 - dpfe_dev->parent = dev; 722 - dpfe_dev->groups = dpfe_groups; 723 - dpfe_dev->of_node = dev->of_node; 724 - dev_set_drvdata(dpfe_dev, priv); 725 - dev_set_name(dpfe_dev, "dpfe%u", index); 726 - 727 - ret = device_register(dpfe_dev); 728 - if (ret) 729 - goto err; 730 - 731 - dev_info(dev, "registered.\n"); 732 - 733 - return 0; 734 - 735 - err: 736 - dev_err(dev, "failed to initialize -- error %d\n", ret); 720 + ret = sysfs_create_groups(&pdev->dev.kobj, dpfe_groups); 721 + if (!ret) 722 + dev_info(dev, "registered.\n"); 737 723 738 724 return ret; 725 + } 726 + 727 + static int brcmstb_dpfe_remove(struct platform_device *pdev) 728 + { 729 + sysfs_remove_groups(&pdev->dev.kobj, dpfe_groups); 730 + 731 + return 0; 739 732 } 740 733 741 734 static const struct of_device_id brcmstb_dpfe_of_match[] = { ··· 735 758 .of_match_table = brcmstb_dpfe_of_match, 736 759 }, 737 760 .probe = brcmstb_dpfe_probe, 761 + .remove = brcmstb_dpfe_remove, 738 762 .resume = brcmstb_dpfe_resume, 739 763 }; 740 764
+2 -2
drivers/memory/omap-gpmc.c
··· 2060 2060 * timings. 2061 2061 */ 2062 2062 name = gpmc_cs_get_name(cs); 2063 - if (name && child->name && of_node_cmp(child->name, name) == 0) 2064 - goto no_timings; 2063 + if (name && of_node_cmp(child->name, name) == 0) 2064 + goto no_timings; 2065 2065 2066 2066 ret = gpmc_cs_request(cs, resource_size(&res), &base); 2067 2067 if (ret < 0) {
+1
drivers/memory/tegra/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 tegra-mc-y := mc.o 3 3 4 + tegra-mc-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20.o 4 5 tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o 5 6 tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o 6 7 tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
+331 -45
drivers/memory/tegra/mc.c
··· 7 7 */ 8 8 9 9 #include <linux/clk.h> 10 + #include <linux/delay.h> 10 11 #include <linux/interrupt.h> 11 12 #include <linux/kernel.h> 12 13 #include <linux/module.h> ··· 21 20 #include "mc.h" 22 21 23 22 #define MC_INTSTATUS 0x000 24 - #define MC_INT_DECERR_MTS (1 << 16) 25 - #define MC_INT_SECERR_SEC (1 << 13) 26 - #define MC_INT_DECERR_VPR (1 << 12) 27 - #define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) 28 - #define MC_INT_INVALID_SMMU_PAGE (1 << 10) 29 - #define MC_INT_ARBITRATION_EMEM (1 << 9) 30 - #define MC_INT_SECURITY_VIOLATION (1 << 8) 31 - #define MC_INT_DECERR_EMEM (1 << 6) 32 23 33 24 #define MC_INTMASK 0x004 34 25 ··· 38 45 39 46 #define MC_ERR_ADR 0x0c 40 47 48 + #define MC_DECERR_EMEM_OTHERS_STATUS 0x58 49 + #define MC_SECURITY_VIOLATION_STATUS 0x74 50 + 41 51 #define MC_EMEM_ARB_CFG 0x90 42 52 #define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) (((x) & 0x1ff) << 0) 43 53 #define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff ··· 50 54 #define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0) 51 55 52 56 static const struct of_device_id tegra_mc_of_match[] = { 57 + #ifdef CONFIG_ARCH_TEGRA_2x_SOC 58 + { .compatible = "nvidia,tegra20-mc", .data = &tegra20_mc_soc }, 59 + #endif 53 60 #ifdef CONFIG_ARCH_TEGRA_3x_SOC 54 61 { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc }, 55 62 #endif ··· 71 72 { } 72 73 }; 73 74 MODULE_DEVICE_TABLE(of, tegra_mc_of_match); 75 + 76 + static int terga_mc_block_dma_common(struct tegra_mc *mc, 77 + const struct tegra_mc_reset *rst) 78 + { 79 + unsigned long flags; 80 + u32 value; 81 + 82 + spin_lock_irqsave(&mc->lock, flags); 83 + 84 + value = mc_readl(mc, rst->control) | BIT(rst->bit); 85 + mc_writel(mc, value, rst->control); 86 + 87 + spin_unlock_irqrestore(&mc->lock, flags); 88 + 89 + return 0; 90 + } 91 + 92 + static bool terga_mc_dma_idling_common(struct tegra_mc *mc, 93 + const struct tegra_mc_reset *rst) 94 + { 95 + return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0; 96 + } 97 + 98 + static int terga_mc_unblock_dma_common(struct tegra_mc *mc, 99 + const struct tegra_mc_reset *rst) 100 + { 101 + unsigned long flags; 102 + u32 value; 103 + 104 + spin_lock_irqsave(&mc->lock, flags); 105 + 106 + value = mc_readl(mc, rst->control) & ~BIT(rst->bit); 107 + mc_writel(mc, value, rst->control); 108 + 109 + spin_unlock_irqrestore(&mc->lock, flags); 110 + 111 + return 0; 112 + } 113 + 114 + static int terga_mc_reset_status_common(struct tegra_mc *mc, 115 + const struct tegra_mc_reset *rst) 116 + { 117 + return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0; 118 + } 119 + 120 + const struct tegra_mc_reset_ops terga_mc_reset_ops_common = { 121 + .block_dma = terga_mc_block_dma_common, 122 + .dma_idling = terga_mc_dma_idling_common, 123 + .unblock_dma = terga_mc_unblock_dma_common, 124 + .reset_status = terga_mc_reset_status_common, 125 + }; 126 + 127 + static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev) 128 + { 129 + return container_of(rcdev, struct tegra_mc, reset); 130 + } 131 + 132 + static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc, 133 + unsigned long id) 134 + { 135 + unsigned int i; 136 + 137 + for (i = 0; i < mc->soc->num_resets; i++) 138 + if (mc->soc->resets[i].id == id) 139 + return &mc->soc->resets[i]; 140 + 141 + return NULL; 142 + } 143 + 144 + static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev, 145 + unsigned long id) 146 + { 147 + struct tegra_mc *mc = reset_to_mc(rcdev); 148 + const struct tegra_mc_reset_ops *rst_ops; 149 + const struct tegra_mc_reset *rst; 150 + int retries = 500; 151 + int err; 152 + 153 + rst = tegra_mc_reset_find(mc, id); 154 + if (!rst) 155 + return -ENODEV; 156 + 157 + rst_ops = mc->soc->reset_ops; 158 + if (!rst_ops) 159 + return -ENODEV; 160 + 161 + if (rst_ops->block_dma) { 162 + /* block clients DMA requests */ 163 + err = rst_ops->block_dma(mc, rst); 164 + if (err) { 165 + dev_err(mc->dev, "Failed to block %s DMA: %d\n", 166 + rst->name, err); 167 + return err; 168 + } 169 + } 170 + 171 + if (rst_ops->dma_idling) { 172 + /* wait for completion of the outstanding DMA requests */ 173 + while (!rst_ops->dma_idling(mc, rst)) { 174 + if (!retries--) { 175 + dev_err(mc->dev, "Failed to flush %s DMA\n", 176 + rst->name); 177 + return -EBUSY; 178 + } 179 + 180 + usleep_range(10, 100); 181 + } 182 + } 183 + 184 + if (rst_ops->hotreset_assert) { 185 + /* clear clients DMA requests sitting before arbitration */ 186 + err = rst_ops->hotreset_assert(mc, rst); 187 + if (err) { 188 + dev_err(mc->dev, "Failed to hot reset %s: %d\n", 189 + rst->name, err); 190 + return err; 191 + } 192 + } 193 + 194 + return 0; 195 + } 196 + 197 + static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev, 198 + unsigned long id) 199 + { 200 + struct tegra_mc *mc = reset_to_mc(rcdev); 201 + const struct tegra_mc_reset_ops *rst_ops; 202 + const struct tegra_mc_reset *rst; 203 + int err; 204 + 205 + rst = tegra_mc_reset_find(mc, id); 206 + if (!rst) 207 + return -ENODEV; 208 + 209 + rst_ops = mc->soc->reset_ops; 210 + if (!rst_ops) 211 + return -ENODEV; 212 + 213 + if (rst_ops->hotreset_deassert) { 214 + /* take out client from hot reset */ 215 + err = rst_ops->hotreset_deassert(mc, rst); 216 + if (err) { 217 + dev_err(mc->dev, "Failed to deassert hot reset %s: %d\n", 218 + rst->name, err); 219 + return err; 220 + } 221 + } 222 + 223 + if (rst_ops->unblock_dma) { 224 + /* allow new DMA requests to proceed to arbitration */ 225 + err = rst_ops->unblock_dma(mc, rst); 226 + if (err) { 227 + dev_err(mc->dev, "Failed to unblock %s DMA : %d\n", 228 + rst->name, err); 229 + return err; 230 + } 231 + } 232 + 233 + return 0; 234 + } 235 + 236 + static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev, 237 + unsigned long id) 238 + { 239 + struct tegra_mc *mc = reset_to_mc(rcdev); 240 + const struct tegra_mc_reset_ops *rst_ops; 241 + const struct tegra_mc_reset *rst; 242 + 243 + rst = tegra_mc_reset_find(mc, id); 244 + if (!rst) 245 + return -ENODEV; 246 + 247 + rst_ops = mc->soc->reset_ops; 248 + if (!rst_ops) 249 + return -ENODEV; 250 + 251 + return rst_ops->reset_status(mc, rst); 252 + } 253 + 254 + static const struct reset_control_ops tegra_mc_reset_ops = { 255 + .assert = tegra_mc_hotreset_assert, 256 + .deassert = tegra_mc_hotreset_deassert, 257 + .status = tegra_mc_hotreset_status, 258 + }; 259 + 260 + static int tegra_mc_reset_setup(struct tegra_mc *mc) 261 + { 262 + int err; 263 + 264 + mc->reset.ops = &tegra_mc_reset_ops; 265 + mc->reset.owner = THIS_MODULE; 266 + mc->reset.of_node = mc->dev->of_node; 267 + mc->reset.of_reset_n_cells = 1; 268 + mc->reset.nr_resets = mc->soc->num_resets; 269 + 270 + err = reset_controller_register(&mc->reset); 271 + if (err < 0) 272 + return err; 273 + 274 + return 0; 275 + } 74 276 75 277 static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc) 76 278 { ··· 429 229 static const char *const status_names[32] = { 430 230 [ 1] = "External interrupt", 431 231 [ 6] = "EMEM address decode error", 232 + [ 7] = "GART page fault", 432 233 [ 8] = "Security violation", 433 234 [ 9] = "EMEM arbitration error", 434 235 [10] = "Page fault", ··· 449 248 static irqreturn_t tegra_mc_irq(int irq, void *data) 450 249 { 451 250 struct tegra_mc *mc = data; 452 - unsigned long status, mask; 251 + unsigned long status; 453 252 unsigned int bit; 454 253 455 254 /* mask all interrupts to avoid flooding */ 456 - status = mc_readl(mc, MC_INTSTATUS); 457 - mask = mc_readl(mc, MC_INTMASK); 255 + status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; 256 + if (!status) 257 + return IRQ_NONE; 458 258 459 259 for_each_set_bit(bit, &status, 32) { 460 260 const char *error = status_names[bit] ?: "unknown"; ··· 543 341 return IRQ_HANDLED; 544 342 } 545 343 344 + static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data) 345 + { 346 + struct tegra_mc *mc = data; 347 + unsigned long status; 348 + unsigned int bit; 349 + 350 + /* mask all interrupts to avoid flooding */ 351 + status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; 352 + if (!status) 353 + return IRQ_NONE; 354 + 355 + for_each_set_bit(bit, &status, 32) { 356 + const char *direction = "read", *secure = ""; 357 + const char *error = status_names[bit]; 358 + const char *client, *desc; 359 + phys_addr_t addr; 360 + u32 value, reg; 361 + u8 id, type; 362 + 363 + switch (BIT(bit)) { 364 + case MC_INT_DECERR_EMEM: 365 + reg = MC_DECERR_EMEM_OTHERS_STATUS; 366 + value = mc_readl(mc, reg); 367 + 368 + id = value & mc->soc->client_id_mask; 369 + desc = error_names[2]; 370 + 371 + if (value & BIT(31)) 372 + direction = "write"; 373 + break; 374 + 375 + case MC_INT_INVALID_GART_PAGE: 376 + dev_err_ratelimited(mc->dev, "%s\n", error); 377 + continue; 378 + 379 + case MC_INT_SECURITY_VIOLATION: 380 + reg = MC_SECURITY_VIOLATION_STATUS; 381 + value = mc_readl(mc, reg); 382 + 383 + id = value & mc->soc->client_id_mask; 384 + type = (value & BIT(30)) ? 4 : 3; 385 + desc = error_names[type]; 386 + secure = "secure "; 387 + 388 + if (value & BIT(31)) 389 + direction = "write"; 390 + break; 391 + 392 + default: 393 + continue; 394 + } 395 + 396 + client = mc->soc->clients[id].name; 397 + addr = mc_readl(mc, reg + sizeof(u32)); 398 + 399 + dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s)\n", 400 + client, secure, direction, &addr, error, 401 + desc); 402 + } 403 + 404 + /* clear interrupts */ 405 + mc_writel(mc, status, MC_INTSTATUS); 406 + 407 + return IRQ_HANDLED; 408 + } 409 + 546 410 static int tegra_mc_probe(struct platform_device *pdev) 547 411 { 548 412 const struct of_device_id *match; 549 413 struct resource *res; 550 414 struct tegra_mc *mc; 551 - u32 value; 415 + void *isr; 552 416 int err; 553 417 554 418 match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); ··· 626 358 return -ENOMEM; 627 359 628 360 platform_set_drvdata(pdev, mc); 361 + spin_lock_init(&mc->lock); 629 362 mc->soc = match->data; 630 363 mc->dev = &pdev->dev; 631 364 ··· 638 369 if (IS_ERR(mc->regs)) 639 370 return PTR_ERR(mc->regs); 640 371 641 - mc->clk = devm_clk_get(&pdev->dev, "mc"); 642 - if (IS_ERR(mc->clk)) { 643 - dev_err(&pdev->dev, "failed to get MC clock: %ld\n", 644 - PTR_ERR(mc->clk)); 645 - return PTR_ERR(mc->clk); 646 - } 372 + #ifdef CONFIG_ARCH_TEGRA_2x_SOC 373 + if (mc->soc == &tegra20_mc_soc) { 374 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 375 + mc->regs2 = devm_ioremap_resource(&pdev->dev, res); 376 + if (IS_ERR(mc->regs2)) 377 + return PTR_ERR(mc->regs2); 647 378 648 - err = tegra_mc_setup_latency_allowance(mc); 649 - if (err < 0) { 650 - dev_err(&pdev->dev, "failed to setup latency allowance: %d\n", 651 - err); 652 - return err; 379 + isr = tegra20_mc_irq; 380 + } else 381 + #endif 382 + { 383 + mc->clk = devm_clk_get(&pdev->dev, "mc"); 384 + if (IS_ERR(mc->clk)) { 385 + dev_err(&pdev->dev, "failed to get MC clock: %ld\n", 386 + PTR_ERR(mc->clk)); 387 + return PTR_ERR(mc->clk); 388 + } 389 + 390 + err = tegra_mc_setup_latency_allowance(mc); 391 + if (err < 0) { 392 + dev_err(&pdev->dev, "failed to setup latency allowance: %d\n", 393 + err); 394 + return err; 395 + } 396 + 397 + isr = tegra_mc_irq; 653 398 } 654 399 655 400 err = tegra_mc_setup_timings(mc); 656 401 if (err < 0) { 657 402 dev_err(&pdev->dev, "failed to setup timings: %d\n", err); 403 + return err; 404 + } 405 + 406 + err = tegra_mc_reset_setup(mc); 407 + if (err < 0) { 408 + dev_err(&pdev->dev, "failed to register reset controller: %d\n", 409 + err); 410 + return err; 411 + } 412 + 413 + mc->irq = platform_get_irq(pdev, 0); 414 + if (mc->irq < 0) { 415 + dev_err(&pdev->dev, "interrupt not specified\n"); 416 + return mc->irq; 417 + } 418 + 419 + WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); 420 + 421 + mc_writel(mc, mc->soc->intmask, MC_INTMASK); 422 + 423 + err = devm_request_irq(&pdev->dev, mc->irq, isr, IRQF_SHARED, 424 + dev_name(&pdev->dev), mc); 425 + if (err < 0) { 426 + dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq, 427 + err); 658 428 return err; 659 429 } 660 430 ··· 705 397 return PTR_ERR(mc->smmu); 706 398 } 707 399 } 708 - 709 - mc->irq = platform_get_irq(pdev, 0); 710 - if (mc->irq < 0) { 711 - dev_err(&pdev->dev, "interrupt not specified\n"); 712 - return mc->irq; 713 - } 714 - 715 - err = devm_request_irq(&pdev->dev, mc->irq, tegra_mc_irq, IRQF_SHARED, 716 - dev_name(&pdev->dev), mc); 717 - if (err < 0) { 718 - dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq, 719 - err); 720 - return err; 721 - } 722 - 723 - WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); 724 - 725 - value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 726 - MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 727 - MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; 728 - 729 - mc_writel(mc, value, MC_INTMASK); 730 400 731 401 return 0; 732 402 }
+22
drivers/memory/tegra/mc.h
··· 14 14 15 15 #include <soc/tegra/mc.h> 16 16 17 + #define MC_INT_DECERR_MTS (1 << 16) 18 + #define MC_INT_SECERR_SEC (1 << 13) 19 + #define MC_INT_DECERR_VPR (1 << 12) 20 + #define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) 21 + #define MC_INT_INVALID_SMMU_PAGE (1 << 10) 22 + #define MC_INT_ARBITRATION_EMEM (1 << 9) 23 + #define MC_INT_SECURITY_VIOLATION (1 << 8) 24 + #define MC_INT_INVALID_GART_PAGE (1 << 7) 25 + #define MC_INT_DECERR_EMEM (1 << 6) 26 + 17 27 static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) 18 28 { 29 + if (mc->regs2 && offset >= 0x24) 30 + return readl(mc->regs2 + offset - 0x3c); 31 + 19 32 return readl(mc->regs + offset); 20 33 } 21 34 22 35 static inline void mc_writel(struct tegra_mc *mc, u32 value, 23 36 unsigned long offset) 24 37 { 38 + if (mc->regs2 && offset >= 0x24) 39 + return writel(value, mc->regs2 + offset - 0x3c); 40 + 25 41 writel(value, mc->regs + offset); 26 42 } 43 + 44 + extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common; 45 + 46 + #ifdef CONFIG_ARCH_TEGRA_2x_SOC 47 + extern const struct tegra_mc_soc tegra20_mc_soc; 48 + #endif 27 49 28 50 #ifdef CONFIG_ARCH_TEGRA_3x_SOC 29 51 extern const struct tegra_mc_soc tegra30_mc_soc;
+33
drivers/memory/tegra/tegra114.c
··· 938 938 .num_asids = 4, 939 939 }; 940 940 941 + #define TEGRA114_MC_RESET(_name, _control, _status, _bit) \ 942 + { \ 943 + .name = #_name, \ 944 + .id = TEGRA114_MC_RESET_##_name, \ 945 + .control = _control, \ 946 + .status = _status, \ 947 + .bit = _bit, \ 948 + } 949 + 950 + static const struct tegra_mc_reset tegra114_mc_resets[] = { 951 + TEGRA114_MC_RESET(AVPC, 0x200, 0x204, 1), 952 + TEGRA114_MC_RESET(DC, 0x200, 0x204, 2), 953 + TEGRA114_MC_RESET(DCB, 0x200, 0x204, 3), 954 + TEGRA114_MC_RESET(EPP, 0x200, 0x204, 4), 955 + TEGRA114_MC_RESET(2D, 0x200, 0x204, 5), 956 + TEGRA114_MC_RESET(HC, 0x200, 0x204, 6), 957 + TEGRA114_MC_RESET(HDA, 0x200, 0x204, 7), 958 + TEGRA114_MC_RESET(ISP, 0x200, 0x204, 8), 959 + TEGRA114_MC_RESET(MPCORE, 0x200, 0x204, 9), 960 + TEGRA114_MC_RESET(MPCORELP, 0x200, 0x204, 10), 961 + TEGRA114_MC_RESET(MPE, 0x200, 0x204, 11), 962 + TEGRA114_MC_RESET(3D, 0x200, 0x204, 12), 963 + TEGRA114_MC_RESET(3D2, 0x200, 0x204, 13), 964 + TEGRA114_MC_RESET(PPCS, 0x200, 0x204, 14), 965 + TEGRA114_MC_RESET(VDE, 0x200, 0x204, 16), 966 + TEGRA114_MC_RESET(VI, 0x200, 0x204, 17), 967 + }; 968 + 941 969 const struct tegra_mc_soc tegra114_mc_soc = { 942 970 .clients = tegra114_mc_clients, 943 971 .num_clients = ARRAY_SIZE(tegra114_mc_clients), ··· 973 945 .atom_size = 32, 974 946 .client_id_mask = 0x7f, 975 947 .smmu = &tegra114_smmu_soc, 948 + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | 949 + MC_INT_DECERR_EMEM, 950 + .reset_ops = &terga_mc_reset_ops_common, 951 + .resets = tegra114_mc_resets, 952 + .num_resets = ARRAY_SIZE(tegra114_mc_resets), 976 953 };
+48
drivers/memory/tegra/tegra124.c
··· 1012 1012 }, 1013 1013 }; 1014 1014 1015 + #define TEGRA124_MC_RESET(_name, _control, _status, _bit) \ 1016 + { \ 1017 + .name = #_name, \ 1018 + .id = TEGRA124_MC_RESET_##_name, \ 1019 + .control = _control, \ 1020 + .status = _status, \ 1021 + .bit = _bit, \ 1022 + } 1023 + 1024 + static const struct tegra_mc_reset tegra124_mc_resets[] = { 1025 + TEGRA124_MC_RESET(AFI, 0x200, 0x204, 0), 1026 + TEGRA124_MC_RESET(AVPC, 0x200, 0x204, 1), 1027 + TEGRA124_MC_RESET(DC, 0x200, 0x204, 2), 1028 + TEGRA124_MC_RESET(DCB, 0x200, 0x204, 3), 1029 + TEGRA124_MC_RESET(HC, 0x200, 0x204, 6), 1030 + TEGRA124_MC_RESET(HDA, 0x200, 0x204, 7), 1031 + TEGRA124_MC_RESET(ISP2, 0x200, 0x204, 8), 1032 + TEGRA124_MC_RESET(MPCORE, 0x200, 0x204, 9), 1033 + TEGRA124_MC_RESET(MPCORELP, 0x200, 0x204, 10), 1034 + TEGRA124_MC_RESET(MSENC, 0x200, 0x204, 11), 1035 + TEGRA124_MC_RESET(PPCS, 0x200, 0x204, 14), 1036 + TEGRA124_MC_RESET(SATA, 0x200, 0x204, 15), 1037 + TEGRA124_MC_RESET(VDE, 0x200, 0x204, 16), 1038 + TEGRA124_MC_RESET(VI, 0x200, 0x204, 17), 1039 + TEGRA124_MC_RESET(VIC, 0x200, 0x204, 18), 1040 + TEGRA124_MC_RESET(XUSB_HOST, 0x200, 0x204, 19), 1041 + TEGRA124_MC_RESET(XUSB_DEV, 0x200, 0x204, 20), 1042 + TEGRA124_MC_RESET(TSEC, 0x200, 0x204, 21), 1043 + TEGRA124_MC_RESET(SDMMC1, 0x200, 0x204, 22), 1044 + TEGRA124_MC_RESET(SDMMC2, 0x200, 0x204, 23), 1045 + TEGRA124_MC_RESET(SDMMC3, 0x200, 0x204, 25), 1046 + TEGRA124_MC_RESET(SDMMC4, 0x970, 0x974, 0), 1047 + TEGRA124_MC_RESET(ISP2B, 0x970, 0x974, 1), 1048 + TEGRA124_MC_RESET(GPU, 0x970, 0x974, 2), 1049 + }; 1050 + 1015 1051 #ifdef CONFIG_ARCH_TEGRA_124_SOC 1016 1052 static const struct tegra_smmu_soc tegra124_smmu_soc = { 1017 1053 .clients = tegra124_mc_clients, ··· 1071 1035 .smmu = &tegra124_smmu_soc, 1072 1036 .emem_regs = tegra124_mc_emem_regs, 1073 1037 .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), 1038 + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1039 + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1040 + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1041 + .reset_ops = &terga_mc_reset_ops_common, 1042 + .resets = tegra124_mc_resets, 1043 + .num_resets = ARRAY_SIZE(tegra124_mc_resets), 1074 1044 }; 1075 1045 #endif /* CONFIG_ARCH_TEGRA_124_SOC */ 1076 1046 ··· 1101 1059 .atom_size = 32, 1102 1060 .client_id_mask = 0x7f, 1103 1061 .smmu = &tegra132_smmu_soc, 1062 + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1063 + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1064 + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1065 + .reset_ops = &terga_mc_reset_ops_common, 1066 + .resets = tegra124_mc_resets, 1067 + .num_resets = ARRAY_SIZE(tegra124_mc_resets), 1104 1068 }; 1105 1069 #endif /* CONFIG_ARCH_TEGRA_132_SOC */
+296
drivers/memory/tegra/tegra20.c
··· 1 + /* 2 + * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #include <dt-bindings/memory/tegra20-mc.h> 10 + 11 + #include "mc.h" 12 + 13 + static const struct tegra_mc_client tegra20_mc_clients[] = { 14 + { 15 + .id = 0x00, 16 + .name = "display0a", 17 + }, { 18 + .id = 0x01, 19 + .name = "display0ab", 20 + }, { 21 + .id = 0x02, 22 + .name = "display0b", 23 + }, { 24 + .id = 0x03, 25 + .name = "display0bb", 26 + }, { 27 + .id = 0x04, 28 + .name = "display0c", 29 + }, { 30 + .id = 0x05, 31 + .name = "display0cb", 32 + }, { 33 + .id = 0x06, 34 + .name = "display1b", 35 + }, { 36 + .id = 0x07, 37 + .name = "display1bb", 38 + }, { 39 + .id = 0x08, 40 + .name = "eppup", 41 + }, { 42 + .id = 0x09, 43 + .name = "g2pr", 44 + }, { 45 + .id = 0x0a, 46 + .name = "g2sr", 47 + }, { 48 + .id = 0x0b, 49 + .name = "mpeunifbr", 50 + }, { 51 + .id = 0x0c, 52 + .name = "viruv", 53 + }, { 54 + .id = 0x0d, 55 + .name = "avpcarm7r", 56 + }, { 57 + .id = 0x0e, 58 + .name = "displayhc", 59 + }, { 60 + .id = 0x0f, 61 + .name = "displayhcb", 62 + }, { 63 + .id = 0x10, 64 + .name = "fdcdrd", 65 + }, { 66 + .id = 0x11, 67 + .name = "g2dr", 68 + }, { 69 + .id = 0x12, 70 + .name = "host1xdmar", 71 + }, { 72 + .id = 0x13, 73 + .name = "host1xr", 74 + }, { 75 + .id = 0x14, 76 + .name = "idxsrd", 77 + }, { 78 + .id = 0x15, 79 + .name = "mpcorer", 80 + }, { 81 + .id = 0x16, 82 + .name = "mpe_ipred", 83 + }, { 84 + .id = 0x17, 85 + .name = "mpeamemrd", 86 + }, { 87 + .id = 0x18, 88 + .name = "mpecsrd", 89 + }, { 90 + .id = 0x19, 91 + .name = "ppcsahbdmar", 92 + }, { 93 + .id = 0x1a, 94 + .name = "ppcsahbslvr", 95 + }, { 96 + .id = 0x1b, 97 + .name = "texsrd", 98 + }, { 99 + .id = 0x1c, 100 + .name = "vdebsevr", 101 + }, { 102 + .id = 0x1d, 103 + .name = "vdember", 104 + }, { 105 + .id = 0x1e, 106 + .name = "vdemcer", 107 + }, { 108 + .id = 0x1f, 109 + .name = "vdetper", 110 + }, { 111 + .id = 0x20, 112 + .name = "eppu", 113 + }, { 114 + .id = 0x21, 115 + .name = "eppv", 116 + }, { 117 + .id = 0x22, 118 + .name = "eppy", 119 + }, { 120 + .id = 0x23, 121 + .name = "mpeunifbw", 122 + }, { 123 + .id = 0x24, 124 + .name = "viwsb", 125 + }, { 126 + .id = 0x25, 127 + .name = "viwu", 128 + }, { 129 + .id = 0x26, 130 + .name = "viwv", 131 + }, { 132 + .id = 0x27, 133 + .name = "viwy", 134 + }, { 135 + .id = 0x28, 136 + .name = "g2dw", 137 + }, { 138 + .id = 0x29, 139 + .name = "avpcarm7w", 140 + }, { 141 + .id = 0x2a, 142 + .name = "fdcdwr", 143 + }, { 144 + .id = 0x2b, 145 + .name = "host1xw", 146 + }, { 147 + .id = 0x2c, 148 + .name = "ispw", 149 + }, { 150 + .id = 0x2d, 151 + .name = "mpcorew", 152 + }, { 153 + .id = 0x2e, 154 + .name = "mpecswr", 155 + }, { 156 + .id = 0x2f, 157 + .name = "ppcsahbdmaw", 158 + }, { 159 + .id = 0x30, 160 + .name = "ppcsahbslvw", 161 + }, { 162 + .id = 0x31, 163 + .name = "vdebsevw", 164 + }, { 165 + .id = 0x32, 166 + .name = "vdembew", 167 + }, { 168 + .id = 0x33, 169 + .name = "vdetpmw", 170 + }, 171 + }; 172 + 173 + #define TEGRA20_MC_RESET(_name, _control, _status, _reset, _bit) \ 174 + { \ 175 + .name = #_name, \ 176 + .id = TEGRA20_MC_RESET_##_name, \ 177 + .control = _control, \ 178 + .status = _status, \ 179 + .reset = _reset, \ 180 + .bit = _bit, \ 181 + } 182 + 183 + static const struct tegra_mc_reset tegra20_mc_resets[] = { 184 + TEGRA20_MC_RESET(AVPC, 0x100, 0x140, 0x104, 0), 185 + TEGRA20_MC_RESET(DC, 0x100, 0x144, 0x104, 1), 186 + TEGRA20_MC_RESET(DCB, 0x100, 0x148, 0x104, 2), 187 + TEGRA20_MC_RESET(EPP, 0x100, 0x14c, 0x104, 3), 188 + TEGRA20_MC_RESET(2D, 0x100, 0x150, 0x104, 4), 189 + TEGRA20_MC_RESET(HC, 0x100, 0x154, 0x104, 5), 190 + TEGRA20_MC_RESET(ISP, 0x100, 0x158, 0x104, 6), 191 + TEGRA20_MC_RESET(MPCORE, 0x100, 0x15c, 0x104, 7), 192 + TEGRA20_MC_RESET(MPEA, 0x100, 0x160, 0x104, 8), 193 + TEGRA20_MC_RESET(MPEB, 0x100, 0x164, 0x104, 9), 194 + TEGRA20_MC_RESET(MPEC, 0x100, 0x168, 0x104, 10), 195 + TEGRA20_MC_RESET(3D, 0x100, 0x16c, 0x104, 11), 196 + TEGRA20_MC_RESET(PPCS, 0x100, 0x170, 0x104, 12), 197 + TEGRA20_MC_RESET(VDE, 0x100, 0x174, 0x104, 13), 198 + TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14), 199 + }; 200 + 201 + static int terga20_mc_hotreset_assert(struct tegra_mc *mc, 202 + const struct tegra_mc_reset *rst) 203 + { 204 + unsigned long flags; 205 + u32 value; 206 + 207 + spin_lock_irqsave(&mc->lock, flags); 208 + 209 + value = mc_readl(mc, rst->reset); 210 + mc_writel(mc, value & ~BIT(rst->bit), rst->reset); 211 + 212 + spin_unlock_irqrestore(&mc->lock, flags); 213 + 214 + return 0; 215 + } 216 + 217 + static int terga20_mc_hotreset_deassert(struct tegra_mc *mc, 218 + const struct tegra_mc_reset *rst) 219 + { 220 + unsigned long flags; 221 + u32 value; 222 + 223 + spin_lock_irqsave(&mc->lock, flags); 224 + 225 + value = mc_readl(mc, rst->reset); 226 + mc_writel(mc, value | BIT(rst->bit), rst->reset); 227 + 228 + spin_unlock_irqrestore(&mc->lock, flags); 229 + 230 + return 0; 231 + } 232 + 233 + static int terga20_mc_block_dma(struct tegra_mc *mc, 234 + const struct tegra_mc_reset *rst) 235 + { 236 + unsigned long flags; 237 + u32 value; 238 + 239 + spin_lock_irqsave(&mc->lock, flags); 240 + 241 + value = mc_readl(mc, rst->control) & ~BIT(rst->bit); 242 + mc_writel(mc, value, rst->control); 243 + 244 + spin_unlock_irqrestore(&mc->lock, flags); 245 + 246 + return 0; 247 + } 248 + 249 + static bool terga20_mc_dma_idling(struct tegra_mc *mc, 250 + const struct tegra_mc_reset *rst) 251 + { 252 + return mc_readl(mc, rst->status) == 0; 253 + } 254 + 255 + static int terga20_mc_reset_status(struct tegra_mc *mc, 256 + const struct tegra_mc_reset *rst) 257 + { 258 + return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0; 259 + } 260 + 261 + static int terga20_mc_unblock_dma(struct tegra_mc *mc, 262 + const struct tegra_mc_reset *rst) 263 + { 264 + unsigned long flags; 265 + u32 value; 266 + 267 + spin_lock_irqsave(&mc->lock, flags); 268 + 269 + value = mc_readl(mc, rst->control) | BIT(rst->bit); 270 + mc_writel(mc, value, rst->control); 271 + 272 + spin_unlock_irqrestore(&mc->lock, flags); 273 + 274 + return 0; 275 + } 276 + 277 + const struct tegra_mc_reset_ops terga20_mc_reset_ops = { 278 + .hotreset_assert = terga20_mc_hotreset_assert, 279 + .hotreset_deassert = terga20_mc_hotreset_deassert, 280 + .block_dma = terga20_mc_block_dma, 281 + .dma_idling = terga20_mc_dma_idling, 282 + .unblock_dma = terga20_mc_unblock_dma, 283 + .reset_status = terga20_mc_reset_status, 284 + }; 285 + 286 + const struct tegra_mc_soc tegra20_mc_soc = { 287 + .clients = tegra20_mc_clients, 288 + .num_clients = ARRAY_SIZE(tegra20_mc_clients), 289 + .num_address_bits = 32, 290 + .client_id_mask = 0x3f, 291 + .intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE | 292 + MC_INT_DECERR_EMEM, 293 + .reset_ops = &terga20_mc_reset_ops, 294 + .resets = tegra20_mc_resets, 295 + .num_resets = ARRAY_SIZE(tegra20_mc_resets), 296 + };
+48 -5
drivers/memory/tegra/tegra210.c
··· 6 6 * published by the Free Software Foundation. 7 7 */ 8 8 9 - #include <linux/of.h> 10 - #include <linux/mm.h> 11 - 12 - #include <asm/cacheflush.h> 13 - 14 9 #include <dt-bindings/memory/tegra210-mc.h> 15 10 16 11 #include "mc.h" ··· 1080 1085 .num_asids = 128, 1081 1086 }; 1082 1087 1088 + #define TEGRA210_MC_RESET(_name, _control, _status, _bit) \ 1089 + { \ 1090 + .name = #_name, \ 1091 + .id = TEGRA210_MC_RESET_##_name, \ 1092 + .control = _control, \ 1093 + .status = _status, \ 1094 + .bit = _bit, \ 1095 + } 1096 + 1097 + static const struct tegra_mc_reset tegra210_mc_resets[] = { 1098 + TEGRA210_MC_RESET(AFI, 0x200, 0x204, 0), 1099 + TEGRA210_MC_RESET(AVPC, 0x200, 0x204, 1), 1100 + TEGRA210_MC_RESET(DC, 0x200, 0x204, 2), 1101 + TEGRA210_MC_RESET(DCB, 0x200, 0x204, 3), 1102 + TEGRA210_MC_RESET(HC, 0x200, 0x204, 6), 1103 + TEGRA210_MC_RESET(HDA, 0x200, 0x204, 7), 1104 + TEGRA210_MC_RESET(ISP2, 0x200, 0x204, 8), 1105 + TEGRA210_MC_RESET(MPCORE, 0x200, 0x204, 9), 1106 + TEGRA210_MC_RESET(NVENC, 0x200, 0x204, 11), 1107 + TEGRA210_MC_RESET(PPCS, 0x200, 0x204, 14), 1108 + TEGRA210_MC_RESET(SATA, 0x200, 0x204, 15), 1109 + TEGRA210_MC_RESET(VI, 0x200, 0x204, 17), 1110 + TEGRA210_MC_RESET(VIC, 0x200, 0x204, 18), 1111 + TEGRA210_MC_RESET(XUSB_HOST, 0x200, 0x204, 19), 1112 + TEGRA210_MC_RESET(XUSB_DEV, 0x200, 0x204, 20), 1113 + TEGRA210_MC_RESET(A9AVP, 0x200, 0x204, 21), 1114 + TEGRA210_MC_RESET(TSEC, 0x200, 0x204, 22), 1115 + TEGRA210_MC_RESET(SDMMC1, 0x200, 0x204, 29), 1116 + TEGRA210_MC_RESET(SDMMC2, 0x200, 0x204, 30), 1117 + TEGRA210_MC_RESET(SDMMC3, 0x200, 0x204, 31), 1118 + TEGRA210_MC_RESET(SDMMC4, 0x970, 0x974, 0), 1119 + TEGRA210_MC_RESET(ISP2B, 0x970, 0x974, 1), 1120 + TEGRA210_MC_RESET(GPU, 0x970, 0x974, 2), 1121 + TEGRA210_MC_RESET(NVDEC, 0x970, 0x974, 5), 1122 + TEGRA210_MC_RESET(APE, 0x970, 0x974, 6), 1123 + TEGRA210_MC_RESET(SE, 0x970, 0x974, 7), 1124 + TEGRA210_MC_RESET(NVJPG, 0x970, 0x974, 8), 1125 + TEGRA210_MC_RESET(AXIAP, 0x970, 0x974, 11), 1126 + TEGRA210_MC_RESET(ETR, 0x970, 0x974, 12), 1127 + TEGRA210_MC_RESET(TSECB, 0x970, 0x974, 13), 1128 + }; 1129 + 1083 1130 const struct tegra_mc_soc tegra210_mc_soc = { 1084 1131 .clients = tegra210_mc_clients, 1085 1132 .num_clients = ARRAY_SIZE(tegra210_mc_clients), ··· 1129 1092 .atom_size = 64, 1130 1093 .client_id_mask = 0xff, 1131 1094 .smmu = &tegra210_smmu_soc, 1095 + .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | 1096 + MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | 1097 + MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, 1098 + .reset_ops = &terga_mc_reset_ops_common, 1099 + .resets = tegra210_mc_resets, 1100 + .num_resets = ARRAY_SIZE(tegra210_mc_resets), 1132 1101 };
+35
drivers/memory/tegra/tegra30.c
··· 960 960 .num_asids = 4, 961 961 }; 962 962 963 + #define TEGRA30_MC_RESET(_name, _control, _status, _bit) \ 964 + { \ 965 + .name = #_name, \ 966 + .id = TEGRA30_MC_RESET_##_name, \ 967 + .control = _control, \ 968 + .status = _status, \ 969 + .bit = _bit, \ 970 + } 971 + 972 + static const struct tegra_mc_reset tegra30_mc_resets[] = { 973 + TEGRA30_MC_RESET(AFI, 0x200, 0x204, 0), 974 + TEGRA30_MC_RESET(AVPC, 0x200, 0x204, 1), 975 + TEGRA30_MC_RESET(DC, 0x200, 0x204, 2), 976 + TEGRA30_MC_RESET(DCB, 0x200, 0x204, 3), 977 + TEGRA30_MC_RESET(EPP, 0x200, 0x204, 4), 978 + TEGRA30_MC_RESET(2D, 0x200, 0x204, 5), 979 + TEGRA30_MC_RESET(HC, 0x200, 0x204, 6), 980 + TEGRA30_MC_RESET(HDA, 0x200, 0x204, 7), 981 + TEGRA30_MC_RESET(ISP, 0x200, 0x204, 8), 982 + TEGRA30_MC_RESET(MPCORE, 0x200, 0x204, 9), 983 + TEGRA30_MC_RESET(MPCORELP, 0x200, 0x204, 10), 984 + TEGRA30_MC_RESET(MPE, 0x200, 0x204, 11), 985 + TEGRA30_MC_RESET(3D, 0x200, 0x204, 12), 986 + TEGRA30_MC_RESET(3D2, 0x200, 0x204, 13), 987 + TEGRA30_MC_RESET(PPCS, 0x200, 0x204, 14), 988 + TEGRA30_MC_RESET(SATA, 0x200, 0x204, 15), 989 + TEGRA30_MC_RESET(VDE, 0x200, 0x204, 16), 990 + TEGRA30_MC_RESET(VI, 0x200, 0x204, 17), 991 + }; 992 + 963 993 const struct tegra_mc_soc tegra30_mc_soc = { 964 994 .clients = tegra30_mc_clients, 965 995 .num_clients = ARRAY_SIZE(tegra30_mc_clients), ··· 997 967 .atom_size = 16, 998 968 .client_id_mask = 0x7f, 999 969 .smmu = &tegra30_smmu_soc, 970 + .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | 971 + MC_INT_DECERR_EMEM, 972 + .reset_ops = &terga_mc_reset_ops_common, 973 + .resets = tegra30_mc_resets, 974 + .num_resets = ARRAY_SIZE(tegra30_mc_resets), 1000 975 };
-254
drivers/memory/tegra20-mc.c
··· 1 - /* 2 - * Tegra20 Memory Controller 3 - * 4 - * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 - * 6 - * This program is free software; you can redistribute it and/or modify it 7 - * under the terms and conditions of the GNU General Public License, 8 - * version 2, as published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope it will be useful, but WITHOUT 11 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 - * more details. 14 - * 15 - * You should have received a copy of the GNU General Public License along with 16 - * this program; if not, write to the Free Software Foundation, Inc., 17 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 - */ 19 - 20 - #include <linux/err.h> 21 - #include <linux/kernel.h> 22 - #include <linux/module.h> 23 - #include <linux/ratelimit.h> 24 - #include <linux/platform_device.h> 25 - #include <linux/interrupt.h> 26 - #include <linux/io.h> 27 - 28 - #define DRV_NAME "tegra20-mc" 29 - 30 - #define MC_INTSTATUS 0x0 31 - #define MC_INTMASK 0x4 32 - 33 - #define MC_INT_ERR_SHIFT 6 34 - #define MC_INT_ERR_MASK (0x1f << MC_INT_ERR_SHIFT) 35 - #define MC_INT_DECERR_EMEM BIT(MC_INT_ERR_SHIFT) 36 - #define MC_INT_INVALID_GART_PAGE BIT(MC_INT_ERR_SHIFT + 1) 37 - #define MC_INT_SECURITY_VIOLATION BIT(MC_INT_ERR_SHIFT + 2) 38 - #define MC_INT_ARBITRATION_EMEM BIT(MC_INT_ERR_SHIFT + 3) 39 - 40 - #define MC_GART_ERROR_REQ 0x30 41 - #define MC_DECERR_EMEM_OTHERS_STATUS 0x58 42 - #define MC_SECURITY_VIOLATION_STATUS 0x74 43 - 44 - #define SECURITY_VIOLATION_TYPE BIT(30) /* 0=TRUSTZONE, 1=CARVEOUT */ 45 - 46 - #define MC_CLIENT_ID_MASK 0x3f 47 - 48 - #define NUM_MC_REG_BANKS 2 49 - 50 - struct tegra20_mc { 51 - void __iomem *regs[NUM_MC_REG_BANKS]; 52 - struct device *dev; 53 - }; 54 - 55 - static inline u32 mc_readl(struct tegra20_mc *mc, u32 offs) 56 - { 57 - u32 val = 0; 58 - 59 - if (offs < 0x24) 60 - val = readl(mc->regs[0] + offs); 61 - else if (offs < 0x400) 62 - val = readl(mc->regs[1] + offs - 0x3c); 63 - 64 - return val; 65 - } 66 - 67 - static inline void mc_writel(struct tegra20_mc *mc, u32 val, u32 offs) 68 - { 69 - if (offs < 0x24) 70 - writel(val, mc->regs[0] + offs); 71 - else if (offs < 0x400) 72 - writel(val, mc->regs[1] + offs - 0x3c); 73 - } 74 - 75 - static const char * const tegra20_mc_client[] = { 76 - "cbr_display0a", 77 - "cbr_display0ab", 78 - "cbr_display0b", 79 - "cbr_display0bb", 80 - "cbr_display0c", 81 - "cbr_display0cb", 82 - "cbr_display1b", 83 - "cbr_display1bb", 84 - "cbr_eppup", 85 - "cbr_g2pr", 86 - "cbr_g2sr", 87 - "cbr_mpeunifbr", 88 - "cbr_viruv", 89 - "csr_avpcarm7r", 90 - "csr_displayhc", 91 - "csr_displayhcb", 92 - "csr_fdcdrd", 93 - "csr_g2dr", 94 - "csr_host1xdmar", 95 - "csr_host1xr", 96 - "csr_idxsrd", 97 - "csr_mpcorer", 98 - "csr_mpe_ipred", 99 - "csr_mpeamemrd", 100 - "csr_mpecsrd", 101 - "csr_ppcsahbdmar", 102 - "csr_ppcsahbslvr", 103 - "csr_texsrd", 104 - "csr_vdebsevr", 105 - "csr_vdember", 106 - "csr_vdemcer", 107 - "csr_vdetper", 108 - "cbw_eppu", 109 - "cbw_eppv", 110 - "cbw_eppy", 111 - "cbw_mpeunifbw", 112 - "cbw_viwsb", 113 - "cbw_viwu", 114 - "cbw_viwv", 115 - "cbw_viwy", 116 - "ccw_g2dw", 117 - "csw_avpcarm7w", 118 - "csw_fdcdwr", 119 - "csw_host1xw", 120 - "csw_ispw", 121 - "csw_mpcorew", 122 - "csw_mpecswr", 123 - "csw_ppcsahbdmaw", 124 - "csw_ppcsahbslvw", 125 - "csw_vdebsevw", 126 - "csw_vdembew", 127 - "csw_vdetpmw", 128 - }; 129 - 130 - static void tegra20_mc_decode(struct tegra20_mc *mc, int n) 131 - { 132 - u32 addr, req; 133 - const char *client = "Unknown"; 134 - int idx, cid; 135 - const struct reg_info { 136 - u32 offset; 137 - u32 write_bit; /* 0=READ, 1=WRITE */ 138 - int cid_shift; 139 - char *message; 140 - } reg[] = { 141 - { 142 - .offset = MC_DECERR_EMEM_OTHERS_STATUS, 143 - .write_bit = 31, 144 - .message = "MC_DECERR", 145 - }, 146 - { 147 - .offset = MC_GART_ERROR_REQ, 148 - .cid_shift = 1, 149 - .message = "MC_GART_ERR", 150 - 151 - }, 152 - { 153 - .offset = MC_SECURITY_VIOLATION_STATUS, 154 - .write_bit = 31, 155 - .message = "MC_SECURITY_ERR", 156 - }, 157 - }; 158 - 159 - idx = n - MC_INT_ERR_SHIFT; 160 - if ((idx < 0) || (idx >= ARRAY_SIZE(reg))) { 161 - dev_err_ratelimited(mc->dev, "Unknown interrupt status %08lx\n", 162 - BIT(n)); 163 - return; 164 - } 165 - 166 - req = mc_readl(mc, reg[idx].offset); 167 - cid = (req >> reg[idx].cid_shift) & MC_CLIENT_ID_MASK; 168 - if (cid < ARRAY_SIZE(tegra20_mc_client)) 169 - client = tegra20_mc_client[cid]; 170 - 171 - addr = mc_readl(mc, reg[idx].offset + sizeof(u32)); 172 - 173 - dev_err_ratelimited(mc->dev, "%s (0x%08x): 0x%08x %s (%s %s)\n", 174 - reg[idx].message, req, addr, client, 175 - (req & BIT(reg[idx].write_bit)) ? "write" : "read", 176 - (reg[idx].offset == MC_SECURITY_VIOLATION_STATUS) ? 177 - ((req & SECURITY_VIOLATION_TYPE) ? 178 - "carveout" : "trustzone") : ""); 179 - } 180 - 181 - static const struct of_device_id tegra20_mc_of_match[] = { 182 - { .compatible = "nvidia,tegra20-mc", }, 183 - {}, 184 - }; 185 - 186 - static irqreturn_t tegra20_mc_isr(int irq, void *data) 187 - { 188 - u32 stat, mask, bit; 189 - struct tegra20_mc *mc = data; 190 - 191 - stat = mc_readl(mc, MC_INTSTATUS); 192 - mask = mc_readl(mc, MC_INTMASK); 193 - mask &= stat; 194 - if (!mask) 195 - return IRQ_NONE; 196 - while ((bit = ffs(mask)) != 0) { 197 - tegra20_mc_decode(mc, bit - 1); 198 - mask &= ~BIT(bit - 1); 199 - } 200 - 201 - mc_writel(mc, stat, MC_INTSTATUS); 202 - return IRQ_HANDLED; 203 - } 204 - 205 - static int tegra20_mc_probe(struct platform_device *pdev) 206 - { 207 - struct resource *irq; 208 - struct tegra20_mc *mc; 209 - int i, err; 210 - u32 intmask; 211 - 212 - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); 213 - if (!mc) 214 - return -ENOMEM; 215 - mc->dev = &pdev->dev; 216 - 217 - for (i = 0; i < ARRAY_SIZE(mc->regs); i++) { 218 - struct resource *res; 219 - 220 - res = platform_get_resource(pdev, IORESOURCE_MEM, i); 221 - mc->regs[i] = devm_ioremap_resource(&pdev->dev, res); 222 - if (IS_ERR(mc->regs[i])) 223 - return PTR_ERR(mc->regs[i]); 224 - } 225 - 226 - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 227 - if (!irq) 228 - return -ENODEV; 229 - err = devm_request_irq(&pdev->dev, irq->start, tegra20_mc_isr, 230 - IRQF_SHARED, dev_name(&pdev->dev), mc); 231 - if (err) 232 - return -ENODEV; 233 - 234 - platform_set_drvdata(pdev, mc); 235 - 236 - intmask = MC_INT_INVALID_GART_PAGE | 237 - MC_INT_DECERR_EMEM | MC_INT_SECURITY_VIOLATION; 238 - mc_writel(mc, intmask, MC_INTMASK); 239 - return 0; 240 - } 241 - 242 - static struct platform_driver tegra20_mc_driver = { 243 - .probe = tegra20_mc_probe, 244 - .driver = { 245 - .name = DRV_NAME, 246 - .of_match_table = tegra20_mc_of_match, 247 - }, 248 - }; 249 - module_platform_driver(tegra20_mc_driver); 250 - 251 - MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); 252 - MODULE_DESCRIPTION("Tegra20 MC driver"); 253 - MODULE_LICENSE("GPL v2"); 254 - MODULE_ALIAS("platform:" DRV_NAME);
+39 -21
drivers/memory/ti-aemif.c
··· 339 339 struct aemif_platform_data *pdata; 340 340 struct of_dev_auxdata *dev_lookup; 341 341 342 - if (np == NULL) 343 - return 0; 344 - 345 342 aemif = devm_kzalloc(dev, sizeof(*aemif), GFP_KERNEL); 346 343 if (!aemif) 347 344 return -ENOMEM; ··· 360 363 361 364 aemif->clk_rate = clk_get_rate(aemif->clk) / MSEC_PER_SEC; 362 365 363 - if (of_device_is_compatible(np, "ti,da850-aemif")) 366 + if (np && of_device_is_compatible(np, "ti,da850-aemif")) 364 367 aemif->cs_offset = 2; 368 + else if (pdata) 369 + aemif->cs_offset = pdata->cs_offset; 365 370 366 371 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 367 372 aemif->base = devm_ioremap_resource(dev, res); ··· 372 373 goto error; 373 374 } 374 375 375 - /* 376 - * For every controller device node, there is a cs device node that 377 - * describe the bus configuration parameters. This functions iterate 378 - * over these nodes and update the cs data array. 379 - */ 380 - for_each_available_child_of_node(np, child_np) { 381 - ret = of_aemif_parse_abus_config(pdev, child_np); 382 - if (ret < 0) 383 - goto error; 376 + if (np) { 377 + /* 378 + * For every controller device node, there is a cs device node 379 + * that describe the bus configuration parameters. This 380 + * functions iterate over these nodes and update the cs data 381 + * array. 382 + */ 383 + for_each_available_child_of_node(np, child_np) { 384 + ret = of_aemif_parse_abus_config(pdev, child_np); 385 + if (ret < 0) 386 + goto error; 387 + } 388 + } else if (pdata && pdata->num_abus_data > 0) { 389 + for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) { 390 + aemif->cs_data[i].cs = pdata->abus_data[i].cs; 391 + aemif_get_hw_params(pdev, i); 392 + } 384 393 } 385 394 386 395 for (i = 0; i < aemif->num_cs; i++) { ··· 401 394 } 402 395 403 396 /* 404 - * Create a child devices explicitly from here to 405 - * guarantee that the child will be probed after the AEMIF timing 406 - * parameters are set. 397 + * Create a child devices explicitly from here to guarantee that the 398 + * child will be probed after the AEMIF timing parameters are set. 407 399 */ 408 - for_each_available_child_of_node(np, child_np) { 409 - ret = of_platform_populate(child_np, NULL, dev_lookup, dev); 410 - if (ret < 0) 411 - goto error; 400 + if (np) { 401 + for_each_available_child_of_node(np, child_np) { 402 + ret = of_platform_populate(child_np, NULL, 403 + dev_lookup, dev); 404 + if (ret < 0) 405 + goto error; 406 + } 407 + } else { 408 + for (i = 0; i < pdata->num_sub_devices; i++) { 409 + pdata->sub_devices[i].dev.parent = dev; 410 + ret = platform_device_register(&pdata->sub_devices[i]); 411 + if (ret) { 412 + dev_warn(dev, "Error register sub device %s\n", 413 + pdata->sub_devices[i].name); 414 + } 415 + } 412 416 } 413 417 414 418 return 0; ··· 440 422 .probe = aemif_probe, 441 423 .remove = aemif_remove, 442 424 .driver = { 443 - .name = KBUILD_MODNAME, 425 + .name = "ti-aemif", 444 426 .of_match_table = of_match_ptr(aemif_of_match), 445 427 }, 446 428 };
+12 -1
drivers/reset/reset-uniphier.c
··· 63 63 UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */ 64 64 UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ 65 65 UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ 66 + UNIPHIER_RESETX(28, 0x2000, 18), /* SATA0 */ 67 + UNIPHIER_RESETX(29, 0x2004, 18), /* SATA1 */ 68 + UNIPHIER_RESETX(30, 0x2000, 19), /* SATA-PHY */ 66 69 UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ 67 70 UNIPHIER_RESET_END, 68 71 }; ··· 76 73 UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (PCIe, USB3) */ 77 74 UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */ 78 75 UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */ 76 + UNIPHIER_RESETX(24, 0x2008, 2), /* PCIe */ 79 77 UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ 80 78 UNIPHIER_RESET_END, 81 79 }; ··· 93 89 UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */ 94 90 UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */ 95 91 UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */ 96 - UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */ 92 + UNIPHIER_RESET(30, 0x2014, 8), /* SATA-PHY (active high) */ 97 93 UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */ 98 94 UNIPHIER_RESET_END, 99 95 }; ··· 103 99 UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ 104 100 UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ 105 101 UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */ 102 + UNIPHIER_RESETX(9, 0x200c, 9), /* HSC */ 106 103 UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ 107 104 UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ 108 105 UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */ ··· 115 110 UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ 116 111 UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ 117 112 UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ 113 + UNIPHIER_RESETX(9, 0x200c, 9), /* HSC */ 118 114 UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */ 119 115 UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ 120 116 UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ 121 117 UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ 122 118 UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */ 119 + UNIPHIER_RESETX(24, 0x200c, 4), /* PCIe */ 123 120 UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */ 124 121 UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */ 125 122 UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */ ··· 141 134 UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */ 142 135 UNIPHIER_RESETX(20, 0x200c, 17), /* USB31-PHY0 */ 143 136 UNIPHIER_RESETX(21, 0x200c, 19), /* USB31-PHY1 */ 137 + UNIPHIER_RESETX(24, 0x200c, 3), /* PCIe */ 138 + UNIPHIER_RESETX(28, 0x200c, 7), /* SATA0 */ 139 + UNIPHIER_RESETX(29, 0x200c, 8), /* SATA1 */ 140 + UNIPHIER_RESETX(30, 0x200c, 21), /* SATA-PHY */ 144 141 UNIPHIER_RESET_END, 145 142 }; 146 143
+13 -5
drivers/soc/imx/gpc.c
··· 443 443 if (domain_index >= of_id_data->num_domains) 444 444 continue; 445 445 446 - domain = &imx_gpc_domains[domain_index]; 447 - domain->regmap = regmap; 448 - domain->ipg_rate_mhz = ipg_rate_mhz; 449 - 450 446 pd_pdev = platform_device_alloc("imx-pgc-power-domain", 451 447 domain_index); 452 448 if (!pd_pdev) { 453 449 of_node_put(np); 454 450 return -ENOMEM; 455 451 } 456 - pd_pdev->dev.platform_data = domain; 452 + 453 + ret = platform_device_add_data(pd_pdev, 454 + &imx_gpc_domains[domain_index], 455 + sizeof(imx_gpc_domains[domain_index])); 456 + if (ret) { 457 + platform_device_put(pd_pdev); 458 + of_node_put(np); 459 + return ret; 460 + } 461 + domain = pd_pdev->dev.platform_data; 462 + domain->regmap = regmap; 463 + domain->ipg_rate_mhz = ipg_rate_mhz; 464 + 457 465 pd_pdev->dev.parent = &pdev->dev; 458 466 pd_pdev->dev.of_node = np; 459 467
+15 -7
drivers/soc/imx/gpcv2.c
··· 155 155 return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false); 156 156 } 157 157 158 - static struct imx7_pgc_domain imx7_pgc_domains[] = { 158 + static const struct imx7_pgc_domain imx7_pgc_domains[] = { 159 159 [IMX7_POWER_DOMAIN_MIPI_PHY] = { 160 160 .genpd = { 161 161 .name = "mipi-phy", ··· 321 321 continue; 322 322 } 323 323 324 - domain = &imx7_pgc_domains[domain_index]; 325 - domain->regmap = regmap; 326 - domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; 327 - domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; 328 - 329 324 pd_pdev = platform_device_alloc("imx7-pgc-domain", 330 325 domain_index); 331 326 if (!pd_pdev) { ··· 329 334 return -ENOMEM; 330 335 } 331 336 332 - pd_pdev->dev.platform_data = domain; 337 + ret = platform_device_add_data(pd_pdev, 338 + &imx7_pgc_domains[domain_index], 339 + sizeof(imx7_pgc_domains[domain_index])); 340 + if (ret) { 341 + platform_device_put(pd_pdev); 342 + of_node_put(np); 343 + return ret; 344 + } 345 + 346 + domain = pd_pdev->dev.platform_data; 347 + domain->regmap = regmap; 348 + domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; 349 + domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; 350 + 333 351 pd_pdev->dev.parent = dev; 334 352 pd_pdev->dev.of_node = np; 335 353
+12 -34
drivers/soc/mediatek/mtk-infracfg.c
··· 17 17 #include <linux/soc/mediatek/infracfg.h> 18 18 #include <asm/processor.h> 19 19 20 + #define MTK_POLL_DELAY_US 10 21 + #define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ)) 22 + 20 23 #define INFRA_TOPAXI_PROTECTEN 0x0220 21 24 #define INFRA_TOPAXI_PROTECTSTA1 0x0228 22 25 #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 ··· 40 37 int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, 41 38 bool reg_update) 42 39 { 43 - unsigned long expired; 44 40 u32 val; 45 41 int ret; 46 42 ··· 49 47 else 50 48 regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_SET, mask); 51 49 52 - expired = jiffies + HZ; 50 + ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1, 51 + val, (val & mask) == mask, 52 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 53 53 54 - while (1) { 55 - ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val); 56 - if (ret) 57 - return ret; 58 - 59 - if ((val & mask) == mask) 60 - break; 61 - 62 - cpu_relax(); 63 - if (time_after(jiffies, expired)) 64 - return -EIO; 65 - } 66 - 67 - return 0; 54 + return ret; 68 55 } 69 56 70 57 /** ··· 71 80 int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, 72 81 bool reg_update) 73 82 { 74 - unsigned long expired; 75 83 int ret; 84 + u32 val; 76 85 77 86 if (reg_update) 78 87 regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0); 79 88 else 80 89 regmap_write(infracfg, INFRA_TOPAXI_PROTECTEN_CLR, mask); 81 90 82 - expired = jiffies + HZ; 91 + ret = regmap_read_poll_timeout(infracfg, INFRA_TOPAXI_PROTECTSTA1, 92 + val, !(val & mask), 93 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 83 94 84 - while (1) { 85 - u32 val; 86 - 87 - ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val); 88 - if (ret) 89 - return ret; 90 - 91 - if (!(val & mask)) 92 - break; 93 - 94 - cpu_relax(); 95 - if (time_after(jiffies, expired)) 96 - return -EIO; 97 - } 98 - 99 - return 0; 95 + return ret; 100 96 }
+3 -10
drivers/soc/mediatek/mtk-pmic-wrap.c
··· 1458 1458 int ret, irq; 1459 1459 struct pmic_wrapper *wrp; 1460 1460 struct device_node *np = pdev->dev.of_node; 1461 - const struct of_device_id *of_id = 1462 - of_match_device(of_pwrap_match_tbl, &pdev->dev); 1463 1461 const struct of_device_id *of_slave_id = NULL; 1464 1462 struct resource *res; 1465 1463 1466 - if (!of_id) { 1467 - dev_err(&pdev->dev, "Error: No device match found\n"); 1468 - return -ENODEV; 1469 - } 1464 + if (np->child) 1465 + of_slave_id = of_match_node(of_slave_match_tbl, np->child); 1470 1466 1471 - if (pdev->dev.of_node->child) 1472 - of_slave_id = of_match_node(of_slave_match_tbl, 1473 - pdev->dev.of_node->child); 1474 1467 if (!of_slave_id) { 1475 1468 dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n"); 1476 1469 return -EINVAL; ··· 1475 1482 1476 1483 platform_set_drvdata(pdev, wrp); 1477 1484 1478 - wrp->master = of_id->data; 1485 + wrp->master = of_device_get_match_data(&pdev->dev); 1479 1486 wrp->slave = of_slave_id->data; 1480 1487 wrp->dev = &pdev->dev; 1481 1488
+67 -100
drivers/soc/mediatek/mtk-scpsys.c
··· 13 13 #include <linux/clk.h> 14 14 #include <linux/init.h> 15 15 #include <linux/io.h> 16 + #include <linux/iopoll.h> 16 17 #include <linux/mfd/syscon.h> 17 18 #include <linux/of_device.h> 18 19 #include <linux/platform_device.h> ··· 27 26 #include <dt-bindings/power/mt7622-power.h> 28 27 #include <dt-bindings/power/mt7623a-power.h> 29 28 #include <dt-bindings/power/mt8173-power.h> 29 + 30 + #define MTK_POLL_DELAY_US 10 31 + #define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ)) 32 + 33 + #define MTK_SCPD_ACTIVE_WAKEUP BIT(0) 34 + #define MTK_SCPD_FWAIT_SRAM BIT(1) 35 + #define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x)) 30 36 31 37 #define SPM_VDE_PWR_CON 0x0210 32 38 #define SPM_MFG_PWR_CON 0x0214 ··· 124 116 u32 sram_pdn_ack_bits; 125 117 u32 bus_prot_mask; 126 118 enum clk_id clk_id[MAX_CLKS]; 127 - bool active_wakeup; 119 + u8 caps; 128 120 }; 129 121 130 122 struct scp; ··· 192 184 { 193 185 struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd); 194 186 struct scp *scp = scpd->scp; 195 - unsigned long timeout; 196 - bool expired; 197 187 void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs; 198 - u32 sram_pdn_ack = scpd->data->sram_pdn_ack_bits; 188 + u32 pdn_ack = scpd->data->sram_pdn_ack_bits; 199 189 u32 val; 200 - int ret; 190 + int ret, tmp; 201 191 int i; 202 192 203 193 if (scpd->supply) { ··· 221 215 writel(val, ctl_addr); 222 216 223 217 /* wait until PWR_ACK = 1 */ 224 - timeout = jiffies + HZ; 225 - expired = false; 226 - while (1) { 227 - ret = scpsys_domain_is_on(scpd); 228 - if (ret > 0) 229 - break; 230 - 231 - if (expired) { 232 - ret = -ETIMEDOUT; 233 - goto err_pwr_ack; 234 - } 235 - 236 - cpu_relax(); 237 - 238 - if (time_after(jiffies, timeout)) 239 - expired = true; 240 - } 218 + ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp > 0, 219 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 220 + if (ret < 0) 221 + goto err_pwr_ack; 241 222 242 223 val &= ~PWR_CLK_DIS_BIT; 243 224 writel(val, ctl_addr); ··· 238 245 val &= ~scpd->data->sram_pdn_bits; 239 246 writel(val, ctl_addr); 240 247 241 - /* wait until SRAM_PDN_ACK all 0 */ 242 - timeout = jiffies + HZ; 243 - expired = false; 244 - while (sram_pdn_ack && (readl(ctl_addr) & sram_pdn_ack)) { 248 + /* Either wait until SRAM_PDN_ACK all 0 or have a force wait */ 249 + if (MTK_SCPD_CAPS(scpd, MTK_SCPD_FWAIT_SRAM)) { 250 + /* 251 + * Currently, MTK_SCPD_FWAIT_SRAM is necessary only for 252 + * MT7622_POWER_DOMAIN_WB and thus just a trivial setup is 253 + * applied here. 254 + */ 255 + usleep_range(12000, 12100); 245 256 246 - if (expired) { 247 - ret = -ETIMEDOUT; 257 + } else { 258 + ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == 0, 259 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 260 + if (ret < 0) 248 261 goto err_pwr_ack; 249 - } 250 - 251 - cpu_relax(); 252 - 253 - if (time_after(jiffies, timeout)) 254 - expired = true; 255 262 } 256 263 257 264 if (scpd->data->bus_prot_mask) { ··· 282 289 { 283 290 struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd); 284 291 struct scp *scp = scpd->scp; 285 - unsigned long timeout; 286 - bool expired; 287 292 void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs; 288 293 u32 pdn_ack = scpd->data->sram_pdn_ack_bits; 289 294 u32 val; 290 - int ret; 295 + int ret, tmp; 291 296 int i; 292 297 293 298 if (scpd->data->bus_prot_mask) { ··· 301 310 writel(val, ctl_addr); 302 311 303 312 /* wait until SRAM_PDN_ACK all 1 */ 304 - timeout = jiffies + HZ; 305 - expired = false; 306 - while (pdn_ack && (readl(ctl_addr) & pdn_ack) != pdn_ack) { 307 - if (expired) { 308 - ret = -ETIMEDOUT; 309 - goto out; 310 - } 311 - 312 - cpu_relax(); 313 - 314 - if (time_after(jiffies, timeout)) 315 - expired = true; 316 - } 313 + ret = readl_poll_timeout(ctl_addr, tmp, (tmp & pdn_ack) == pdn_ack, 314 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 315 + if (ret < 0) 316 + goto out; 317 317 318 318 val |= PWR_ISO_BIT; 319 319 writel(val, ctl_addr); ··· 322 340 writel(val, ctl_addr); 323 341 324 342 /* wait until PWR_ACK = 0 */ 325 - timeout = jiffies + HZ; 326 - expired = false; 327 - while (1) { 328 - ret = scpsys_domain_is_on(scpd); 329 - if (ret == 0) 330 - break; 331 - 332 - if (expired) { 333 - ret = -ETIMEDOUT; 334 - goto out; 335 - } 336 - 337 - cpu_relax(); 338 - 339 - if (time_after(jiffies, timeout)) 340 - expired = true; 341 - } 343 + ret = readx_poll_timeout(scpsys_domain_is_on, scpd, tmp, tmp == 0, 344 + MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT); 345 + if (ret < 0) 346 + goto out; 342 347 343 348 for (i = 0; i < MAX_CLKS && scpd->clk[i]; i++) 344 349 clk_disable_unprepare(scpd->clk[i]); ··· 438 469 genpd->name = data->name; 439 470 genpd->power_off = scpsys_power_off; 440 471 genpd->power_on = scpsys_power_on; 441 - if (scpd->data->active_wakeup) 472 + if (MTK_SCPD_CAPS(scpd, MTK_SCPD_ACTIVE_WAKEUP)) 442 473 genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP; 443 474 } 444 475 ··· 491 522 .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M | 492 523 MT2701_TOP_AXI_PROT_EN_CONN_S, 493 524 .clk_id = {CLK_NONE}, 494 - .active_wakeup = true, 525 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 495 526 }, 496 527 [MT2701_POWER_DOMAIN_DISP] = { 497 528 .name = "disp", ··· 500 531 .sram_pdn_bits = GENMASK(11, 8), 501 532 .clk_id = {CLK_MM}, 502 533 .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_MM_M0, 503 - .active_wakeup = true, 534 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 504 535 }, 505 536 [MT2701_POWER_DOMAIN_MFG] = { 506 537 .name = "mfg", ··· 509 540 .sram_pdn_bits = GENMASK(11, 8), 510 541 .sram_pdn_ack_bits = GENMASK(12, 12), 511 542 .clk_id = {CLK_MFG}, 512 - .active_wakeup = true, 543 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 513 544 }, 514 545 [MT2701_POWER_DOMAIN_VDEC] = { 515 546 .name = "vdec", ··· 518 549 .sram_pdn_bits = GENMASK(11, 8), 519 550 .sram_pdn_ack_bits = GENMASK(12, 12), 520 551 .clk_id = {CLK_MM}, 521 - .active_wakeup = true, 552 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 522 553 }, 523 554 [MT2701_POWER_DOMAIN_ISP] = { 524 555 .name = "isp", ··· 527 558 .sram_pdn_bits = GENMASK(11, 8), 528 559 .sram_pdn_ack_bits = GENMASK(13, 12), 529 560 .clk_id = {CLK_MM}, 530 - .active_wakeup = true, 561 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 531 562 }, 532 563 [MT2701_POWER_DOMAIN_BDP] = { 533 564 .name = "bdp", ··· 535 566 .ctl_offs = SPM_BDP_PWR_CON, 536 567 .sram_pdn_bits = GENMASK(11, 8), 537 568 .clk_id = {CLK_NONE}, 538 - .active_wakeup = true, 569 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 539 570 }, 540 571 [MT2701_POWER_DOMAIN_ETH] = { 541 572 .name = "eth", ··· 544 575 .sram_pdn_bits = GENMASK(11, 8), 545 576 .sram_pdn_ack_bits = GENMASK(15, 12), 546 577 .clk_id = {CLK_ETHIF}, 547 - .active_wakeup = true, 578 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 548 579 }, 549 580 [MT2701_POWER_DOMAIN_HIF] = { 550 581 .name = "hif", ··· 553 584 .sram_pdn_bits = GENMASK(11, 8), 554 585 .sram_pdn_ack_bits = GENMASK(15, 12), 555 586 .clk_id = {CLK_ETHIF}, 556 - .active_wakeup = true, 587 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 557 588 }, 558 589 [MT2701_POWER_DOMAIN_IFR_MSC] = { 559 590 .name = "ifr_msc", 560 591 .sta_mask = PWR_STATUS_IFR_MSC, 561 592 .ctl_offs = SPM_IFR_MSC_PWR_CON, 562 593 .clk_id = {CLK_NONE}, 563 - .active_wakeup = true, 594 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 564 595 }, 565 596 }; 566 597 ··· 575 606 .sram_pdn_bits = GENMASK(8, 8), 576 607 .sram_pdn_ack_bits = GENMASK(12, 12), 577 608 .clk_id = {CLK_MM}, 578 - .active_wakeup = true, 609 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 579 610 }, 580 611 [MT2712_POWER_DOMAIN_VDEC] = { 581 612 .name = "vdec", ··· 584 615 .sram_pdn_bits = GENMASK(8, 8), 585 616 .sram_pdn_ack_bits = GENMASK(12, 12), 586 617 .clk_id = {CLK_MM, CLK_VDEC}, 587 - .active_wakeup = true, 618 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 588 619 }, 589 620 [MT2712_POWER_DOMAIN_VENC] = { 590 621 .name = "venc", ··· 593 624 .sram_pdn_bits = GENMASK(11, 8), 594 625 .sram_pdn_ack_bits = GENMASK(15, 12), 595 626 .clk_id = {CLK_MM, CLK_VENC, CLK_JPGDEC}, 596 - .active_wakeup = true, 627 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 597 628 }, 598 629 [MT2712_POWER_DOMAIN_ISP] = { 599 630 .name = "isp", ··· 602 633 .sram_pdn_bits = GENMASK(11, 8), 603 634 .sram_pdn_ack_bits = GENMASK(13, 12), 604 635 .clk_id = {CLK_MM}, 605 - .active_wakeup = true, 636 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 606 637 }, 607 638 [MT2712_POWER_DOMAIN_AUDIO] = { 608 639 .name = "audio", ··· 611 642 .sram_pdn_bits = GENMASK(11, 8), 612 643 .sram_pdn_ack_bits = GENMASK(15, 12), 613 644 .clk_id = {CLK_AUDIO}, 614 - .active_wakeup = true, 645 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 615 646 }, 616 647 [MT2712_POWER_DOMAIN_USB] = { 617 648 .name = "usb", ··· 620 651 .sram_pdn_bits = GENMASK(10, 8), 621 652 .sram_pdn_ack_bits = GENMASK(14, 12), 622 653 .clk_id = {CLK_NONE}, 623 - .active_wakeup = true, 654 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 624 655 }, 625 656 [MT2712_POWER_DOMAIN_USB2] = { 626 657 .name = "usb2", ··· 629 660 .sram_pdn_bits = GENMASK(10, 8), 630 661 .sram_pdn_ack_bits = GENMASK(14, 12), 631 662 .clk_id = {CLK_NONE}, 632 - .active_wakeup = true, 663 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 633 664 }, 634 665 [MT2712_POWER_DOMAIN_MFG] = { 635 666 .name = "mfg", ··· 639 670 .sram_pdn_ack_bits = GENMASK(16, 16), 640 671 .clk_id = {CLK_MFG}, 641 672 .bus_prot_mask = BIT(14) | BIT(21) | BIT(23), 642 - .active_wakeup = true, 673 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 643 674 }, 644 675 [MT2712_POWER_DOMAIN_MFG_SC1] = { 645 676 .name = "mfg_sc1", ··· 648 679 .sram_pdn_bits = GENMASK(8, 8), 649 680 .sram_pdn_ack_bits = GENMASK(16, 16), 650 681 .clk_id = {CLK_NONE}, 651 - .active_wakeup = true, 682 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 652 683 }, 653 684 [MT2712_POWER_DOMAIN_MFG_SC2] = { 654 685 .name = "mfg_sc2", ··· 657 688 .sram_pdn_bits = GENMASK(8, 8), 658 689 .sram_pdn_ack_bits = GENMASK(16, 16), 659 690 .clk_id = {CLK_NONE}, 660 - .active_wakeup = true, 691 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 661 692 }, 662 693 [MT2712_POWER_DOMAIN_MFG_SC3] = { 663 694 .name = "mfg_sc3", ··· 666 697 .sram_pdn_bits = GENMASK(8, 8), 667 698 .sram_pdn_ack_bits = GENMASK(16, 16), 668 699 .clk_id = {CLK_NONE}, 669 - .active_wakeup = true, 700 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 670 701 }, 671 702 }; 672 703 ··· 766 797 .sram_pdn_ack_bits = GENMASK(15, 12), 767 798 .clk_id = {CLK_NONE}, 768 799 .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_ETHSYS, 769 - .active_wakeup = true, 800 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 770 801 }, 771 802 [MT7622_POWER_DOMAIN_HIF0] = { 772 803 .name = "hif0", ··· 776 807 .sram_pdn_ack_bits = GENMASK(15, 12), 777 808 .clk_id = {CLK_HIFSEL}, 778 809 .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF0, 779 - .active_wakeup = true, 810 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 780 811 }, 781 812 [MT7622_POWER_DOMAIN_HIF1] = { 782 813 .name = "hif1", ··· 786 817 .sram_pdn_ack_bits = GENMASK(15, 12), 787 818 .clk_id = {CLK_HIFSEL}, 788 819 .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_HIF1, 789 - .active_wakeup = true, 820 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 790 821 }, 791 822 [MT7622_POWER_DOMAIN_WB] = { 792 823 .name = "wb", ··· 796 827 .sram_pdn_ack_bits = 0, 797 828 .clk_id = {CLK_NONE}, 798 829 .bus_prot_mask = MT7622_TOP_AXI_PROT_EN_WB, 799 - .active_wakeup = true, 830 + .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_FWAIT_SRAM, 800 831 }, 801 832 }; 802 833 ··· 812 843 .bus_prot_mask = MT2701_TOP_AXI_PROT_EN_CONN_M | 813 844 MT2701_TOP_AXI_PROT_EN_CONN_S, 814 845 .clk_id = {CLK_NONE}, 815 - .active_wakeup = true, 846 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 816 847 }, 817 848 [MT7623A_POWER_DOMAIN_ETH] = { 818 849 .name = "eth", ··· 821 852 .sram_pdn_bits = GENMASK(11, 8), 822 853 .sram_pdn_ack_bits = GENMASK(15, 12), 823 854 .clk_id = {CLK_ETHIF}, 824 - .active_wakeup = true, 855 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 825 856 }, 826 857 [MT7623A_POWER_DOMAIN_HIF] = { 827 858 .name = "hif", ··· 830 861 .sram_pdn_bits = GENMASK(11, 8), 831 862 .sram_pdn_ack_bits = GENMASK(15, 12), 832 863 .clk_id = {CLK_ETHIF}, 833 - .active_wakeup = true, 864 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 834 865 }, 835 866 [MT7623A_POWER_DOMAIN_IFR_MSC] = { 836 867 .name = "ifr_msc", 837 868 .sta_mask = PWR_STATUS_IFR_MSC, 838 869 .ctl_offs = SPM_IFR_MSC_PWR_CON, 839 870 .clk_id = {CLK_NONE}, 840 - .active_wakeup = true, 871 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 841 872 }, 842 873 }; 843 874 ··· 903 934 .sram_pdn_bits = GENMASK(11, 8), 904 935 .sram_pdn_ack_bits = GENMASK(15, 12), 905 936 .clk_id = {CLK_NONE}, 906 - .active_wakeup = true, 937 + .caps = MTK_SCPD_ACTIVE_WAKEUP, 907 938 }, 908 939 [MT8173_POWER_DOMAIN_MFG_ASYNC] = { 909 940 .name = "mfg_async", ··· 1036 1067 1037 1068 static int scpsys_probe(struct platform_device *pdev) 1038 1069 { 1039 - const struct of_device_id *match; 1040 1070 const struct scp_subdomain *sd; 1041 1071 const struct scp_soc_data *soc; 1042 1072 struct scp *scp; 1043 1073 struct genpd_onecell_data *pd_data; 1044 1074 int i, ret; 1045 1075 1046 - match = of_match_device(of_scpsys_match_tbl, &pdev->dev); 1047 - soc = (const struct scp_soc_data *)match->data; 1076 + soc = of_device_get_match_data(&pdev->dev); 1048 1077 1049 1078 scp = init_scp(pdev, soc->domains, soc->num_domains, &soc->regs, 1050 1079 soc->bus_prot_reg_update);
+116 -1
drivers/soc/rockchip/pm_domains.c
··· 19 19 #include <linux/clk.h> 20 20 #include <linux/regmap.h> 21 21 #include <linux/mfd/syscon.h> 22 + #include <dt-bindings/power/px30-power.h> 23 + #include <dt-bindings/power/rk3036-power.h> 24 + #include <dt-bindings/power/rk3128-power.h> 25 + #include <dt-bindings/power/rk3228-power.h> 22 26 #include <dt-bindings/power/rk3288-power.h> 23 27 #include <dt-bindings/power/rk3328-power.h> 24 28 #include <dt-bindings/power/rk3366-power.h> ··· 107 103 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ 108 104 .active_wakeup = wakeup, \ 109 105 } 106 + 107 + #define DOMAIN_RK3036(req, ack, idle, wakeup) \ 108 + { \ 109 + .req_mask = (req >= 0) ? BIT(req) : 0, \ 110 + .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \ 111 + .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ 112 + .idle_mask = (idle >= 0) ? BIT(idle) : 0, \ 113 + .active_wakeup = wakeup, \ 114 + } 115 + 116 + #define DOMAIN_PX30(pwr, status, req, wakeup) \ 117 + DOMAIN_M(pwr, status, req, (req) + 16, req, wakeup) 110 118 111 119 #define DOMAIN_RK3288(pwr, status, req, wakeup) \ 112 120 DOMAIN(pwr, status, req, req, (req) + 16, wakeup) ··· 272 256 return; 273 257 else if (pd->info->pwr_w_mask) 274 258 regmap_write(pmu->regmap, pmu->info->pwr_offset, 275 - on ? pd->info->pwr_mask : 259 + on ? pd->info->pwr_w_mask : 276 260 (pd->info->pwr_mask | pd->info->pwr_w_mask)); 277 261 else 278 262 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset, ··· 716 700 return error; 717 701 } 718 702 703 + static const struct rockchip_domain_info px30_pm_domains[] = { 704 + [PX30_PD_USB] = DOMAIN_PX30(5, 5, 10, false), 705 + [PX30_PD_SDCARD] = DOMAIN_PX30(8, 8, 9, false), 706 + [PX30_PD_GMAC] = DOMAIN_PX30(10, 10, 6, false), 707 + [PX30_PD_MMC_NAND] = DOMAIN_PX30(11, 11, 5, false), 708 + [PX30_PD_VPU] = DOMAIN_PX30(12, 12, 14, false), 709 + [PX30_PD_VO] = DOMAIN_PX30(13, 13, 7, false), 710 + [PX30_PD_VI] = DOMAIN_PX30(14, 14, 8, false), 711 + [PX30_PD_GPU] = DOMAIN_PX30(15, 15, 2, false), 712 + }; 713 + 714 + static const struct rockchip_domain_info rk3036_pm_domains[] = { 715 + [RK3036_PD_MSCH] = DOMAIN_RK3036(14, 23, 30, true), 716 + [RK3036_PD_CORE] = DOMAIN_RK3036(13, 17, 24, false), 717 + [RK3036_PD_PERI] = DOMAIN_RK3036(12, 18, 25, false), 718 + [RK3036_PD_VIO] = DOMAIN_RK3036(11, 19, 26, false), 719 + [RK3036_PD_VPU] = DOMAIN_RK3036(10, 20, 27, false), 720 + [RK3036_PD_GPU] = DOMAIN_RK3036(9, 21, 28, false), 721 + [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false), 722 + }; 723 + 724 + static const struct rockchip_domain_info rk3128_pm_domains[] = { 725 + [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false), 726 + [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true), 727 + [RK3128_PD_VIO] = DOMAIN_RK3288(3, 3, 2, false), 728 + [RK3128_PD_VIDEO] = DOMAIN_RK3288(2, 2, 1, false), 729 + [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false), 730 + }; 731 + 732 + static const struct rockchip_domain_info rk3228_pm_domains[] = { 733 + [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true), 734 + [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true), 735 + [RK3228_PD_BUS] = DOMAIN_RK3036(2, 2, 18, true), 736 + [RK3228_PD_SYS] = DOMAIN_RK3036(3, 3, 19, true), 737 + [RK3228_PD_VIO] = DOMAIN_RK3036(4, 4, 20, false), 738 + [RK3228_PD_VOP] = DOMAIN_RK3036(5, 5, 21, false), 739 + [RK3228_PD_VPU] = DOMAIN_RK3036(6, 6, 22, false), 740 + [RK3228_PD_RKVDEC] = DOMAIN_RK3036(7, 7, 23, false), 741 + [RK3228_PD_GPU] = DOMAIN_RK3036(8, 8, 24, false), 742 + [RK3228_PD_PERI] = DOMAIN_RK3036(9, 9, 25, true), 743 + [RK3228_PD_GMAC] = DOMAIN_RK3036(10, 10, 26, false), 744 + }; 745 + 719 746 static const struct rockchip_domain_info rk3288_pm_domains[] = { 720 747 [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false), 721 748 [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false), ··· 824 765 [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true), 825 766 [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true), 826 767 [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true), 768 + }; 769 + 770 + static const struct rockchip_pmu_info px30_pmu = { 771 + .pwr_offset = 0x18, 772 + .status_offset = 0x20, 773 + .req_offset = 0x64, 774 + .idle_offset = 0x6c, 775 + .ack_offset = 0x6c, 776 + 777 + .num_domains = ARRAY_SIZE(px30_pm_domains), 778 + .domain_info = px30_pm_domains, 779 + }; 780 + 781 + static const struct rockchip_pmu_info rk3036_pmu = { 782 + .req_offset = 0x148, 783 + .idle_offset = 0x14c, 784 + .ack_offset = 0x14c, 785 + 786 + .num_domains = ARRAY_SIZE(rk3036_pm_domains), 787 + .domain_info = rk3036_pm_domains, 788 + }; 789 + 790 + static const struct rockchip_pmu_info rk3128_pmu = { 791 + .pwr_offset = 0x04, 792 + .status_offset = 0x08, 793 + .req_offset = 0x0c, 794 + .idle_offset = 0x10, 795 + .ack_offset = 0x10, 796 + 797 + .num_domains = ARRAY_SIZE(rk3128_pm_domains), 798 + .domain_info = rk3128_pm_domains, 799 + }; 800 + 801 + static const struct rockchip_pmu_info rk3228_pmu = { 802 + .req_offset = 0x40c, 803 + .idle_offset = 0x488, 804 + .ack_offset = 0x488, 805 + 806 + .num_domains = ARRAY_SIZE(rk3228_pm_domains), 807 + .domain_info = rk3228_pm_domains, 827 808 }; 828 809 829 810 static const struct rockchip_pmu_info rk3288_pmu = { ··· 940 841 }; 941 842 942 843 static const struct of_device_id rockchip_pm_domain_dt_match[] = { 844 + { 845 + .compatible = "rockchip,px30-power-controller", 846 + .data = (void *)&px30_pmu, 847 + }, 848 + { 849 + .compatible = "rockchip,rk3036-power-controller", 850 + .data = (void *)&rk3036_pmu, 851 + }, 852 + { 853 + .compatible = "rockchip,rk3128-power-controller", 854 + .data = (void *)&rk3128_pmu, 855 + }, 856 + { 857 + .compatible = "rockchip,rk3228-power-controller", 858 + .data = (void *)&rk3228_pmu, 859 + }, 943 860 { 944 861 .compatible = "rockchip,rk3288-power-controller", 945 862 .data = (void *)&rk3288_pmu,
+1 -89
drivers/soc/samsung/pm_domains.c
··· 13 13 #include <linux/err.h> 14 14 #include <linux/slab.h> 15 15 #include <linux/pm_domain.h> 16 - #include <linux/clk.h> 17 16 #include <linux/delay.h> 18 17 #include <linux/of_address.h> 19 18 #include <linux/of_platform.h> 20 19 #include <linux/sched.h> 21 - 22 - #define MAX_CLK_PER_DOMAIN 4 23 20 24 21 struct exynos_pm_domain_config { 25 22 /* Value for LOCAL_PWR_CFG and STATUS fields for each domain */ ··· 30 33 void __iomem *base; 31 34 bool is_off; 32 35 struct generic_pm_domain pd; 33 - struct clk *oscclk; 34 - struct clk *clk[MAX_CLK_PER_DOMAIN]; 35 - struct clk *pclk[MAX_CLK_PER_DOMAIN]; 36 - struct clk *asb_clk[MAX_CLK_PER_DOMAIN]; 37 36 u32 local_pwr_cfg; 38 37 }; 39 38 ··· 39 46 void __iomem *base; 40 47 u32 timeout, pwr; 41 48 char *op; 42 - int i; 43 49 44 50 pd = container_of(domain, struct exynos_pm_domain, pd); 45 51 base = pd->base; 46 - 47 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 48 - if (IS_ERR(pd->asb_clk[i])) 49 - break; 50 - clk_prepare_enable(pd->asb_clk[i]); 51 - } 52 - 53 - /* Set oscclk before powering off a domain*/ 54 - if (!power_on) { 55 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 56 - if (IS_ERR(pd->clk[i])) 57 - break; 58 - pd->pclk[i] = clk_get_parent(pd->clk[i]); 59 - if (clk_set_parent(pd->clk[i], pd->oscclk)) 60 - pr_err("%s: error setting oscclk as parent to clock %d\n", 61 - domain->name, i); 62 - } 63 - } 64 52 65 53 pwr = power_on ? pd->local_pwr_cfg : 0; 66 54 writel_relaxed(pwr, base); ··· 58 84 timeout--; 59 85 cpu_relax(); 60 86 usleep_range(80, 100); 61 - } 62 - 63 - /* Restore clocks after powering on a domain*/ 64 - if (power_on) { 65 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 66 - if (IS_ERR(pd->clk[i])) 67 - break; 68 - 69 - if (IS_ERR(pd->pclk[i])) 70 - continue; /* Skip on first power up */ 71 - if (clk_set_parent(pd->clk[i], pd->pclk[i])) 72 - pr_err("%s: error setting parent to clock%d\n", 73 - domain->name, i); 74 - } 75 - } 76 - 77 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 78 - if (IS_ERR(pd->asb_clk[i])) 79 - break; 80 - clk_disable_unprepare(pd->asb_clk[i]); 81 87 } 82 88 83 89 return 0; ··· 101 147 return kstrdup_const(name, GFP_KERNEL); 102 148 } 103 149 104 - static const char *soc_force_no_clk[] = { 105 - "samsung,exynos5250-clock", 106 - "samsung,exynos5420-clock", 107 - "samsung,exynos5800-clock", 108 - }; 109 - 110 150 static __init int exynos4_pm_init_power_domain(void) 111 151 { 112 152 struct device_node *np; ··· 109 161 for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) { 110 162 const struct exynos_pm_domain_config *pm_domain_cfg; 111 163 struct exynos_pm_domain *pd; 112 - int on, i; 164 + int on; 113 165 114 166 pm_domain_cfg = match->data; 115 167 ··· 137 189 pd->pd.power_on = exynos_pd_power_on; 138 190 pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg; 139 191 140 - for (i = 0; i < ARRAY_SIZE(soc_force_no_clk); i++) 141 - if (of_find_compatible_node(NULL, NULL, 142 - soc_force_no_clk[i])) 143 - goto no_clk; 144 - 145 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 146 - char clk_name[8]; 147 - 148 - snprintf(clk_name, sizeof(clk_name), "asb%d", i); 149 - pd->asb_clk[i] = of_clk_get_by_name(np, clk_name); 150 - if (IS_ERR(pd->asb_clk[i])) 151 - break; 152 - } 153 - 154 - pd->oscclk = of_clk_get_by_name(np, "oscclk"); 155 - if (IS_ERR(pd->oscclk)) 156 - goto no_clk; 157 - 158 - for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) { 159 - char clk_name[8]; 160 - 161 - snprintf(clk_name, sizeof(clk_name), "clk%d", i); 162 - pd->clk[i] = of_clk_get_by_name(np, clk_name); 163 - if (IS_ERR(pd->clk[i])) 164 - break; 165 - /* 166 - * Skip setting parent on first power up. 167 - * The parent at this time may not be useful at all. 168 - */ 169 - pd->pclk[i] = ERR_PTR(-EINVAL); 170 - } 171 - 172 - if (IS_ERR(pd->clk[0])) 173 - clk_put(pd->oscclk); 174 - 175 - no_clk: 176 192 on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg; 177 193 178 194 pm_genpd_init(&pd->pd, NULL, !on);
+8 -6
drivers/soc/ti/knav_qmss.h
··· 19 19 #ifndef __KNAV_QMSS_H__ 20 20 #define __KNAV_QMSS_H__ 21 21 22 + #include <linux/percpu.h> 23 + 22 24 #define THRESH_GTE BIT(7) 23 25 #define THRESH_LT 0 24 26 ··· 164 162 * notifies: notifier counts 165 163 */ 166 164 struct knav_queue_stats { 167 - atomic_t pushes; 168 - atomic_t pops; 169 - atomic_t push_errors; 170 - atomic_t pop_errors; 171 - atomic_t notifies; 165 + unsigned int pushes; 166 + unsigned int pops; 167 + unsigned int push_errors; 168 + unsigned int pop_errors; 169 + unsigned int notifies; 172 170 }; 173 171 174 172 /** ··· 285 283 struct knav_queue { 286 284 struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; 287 285 struct knav_queue_inst *inst; 288 - struct knav_queue_stats stats; 286 + struct knav_queue_stats __percpu *stats; 289 287 knav_queue_notify_fn notifier_fn; 290 288 void *notifier_fn_arg; 291 289 atomic_t notifier_enabled;
+40 -20
drivers/soc/ti/knav_qmss_queue.c
··· 99 99 continue; 100 100 if (WARN_ON(!qh->notifier_fn)) 101 101 continue; 102 - atomic_inc(&qh->stats.notifies); 102 + this_cpu_inc(qh->stats->notifies); 103 103 qh->notifier_fn(qh->notifier_fn_arg); 104 104 } 105 105 rcu_read_unlock(); ··· 230 230 if (!qh) 231 231 return ERR_PTR(-ENOMEM); 232 232 233 + qh->stats = alloc_percpu(struct knav_queue_stats); 234 + if (!qh->stats) { 235 + ret = -ENOMEM; 236 + goto err; 237 + } 238 + 233 239 qh->flags = flags; 234 240 qh->inst = inst; 235 241 id = inst->id - inst->qmgr->start_queue; ··· 251 245 if (range->ops && range->ops->open_queue) 252 246 ret = range->ops->open_queue(range, inst, flags); 253 247 254 - if (ret) { 255 - devm_kfree(inst->kdev->dev, qh); 256 - return ERR_PTR(ret); 257 - } 248 + if (ret) 249 + goto err; 258 250 } 259 251 list_add_tail_rcu(&qh->list, &inst->handles); 260 252 return qh; 253 + 254 + err: 255 + if (qh->stats) 256 + free_percpu(qh->stats); 257 + devm_kfree(inst->kdev->dev, qh); 258 + return ERR_PTR(ret); 261 259 } 262 260 263 261 static struct knav_queue * ··· 437 427 { 438 428 struct knav_device *kdev = inst->kdev; 439 429 struct knav_queue *qh; 430 + int cpu = 0; 431 + int pushes = 0; 432 + int pops = 0; 433 + int push_errors = 0; 434 + int pop_errors = 0; 435 + int notifies = 0; 440 436 441 437 if (!knav_queue_is_busy(inst)) 442 438 return; ··· 450 434 seq_printf(s, "\tqueue id %d (%s)\n", 451 435 kdev->base_id + inst->id, inst->name); 452 436 for_each_handle_rcu(qh, inst) { 453 - seq_printf(s, "\t\thandle %p: ", qh); 454 - seq_printf(s, "pushes %8d, ", 455 - atomic_read(&qh->stats.pushes)); 456 - seq_printf(s, "pops %8d, ", 457 - atomic_read(&qh->stats.pops)); 458 - seq_printf(s, "count %8d, ", 459 - knav_queue_get_count(qh)); 460 - seq_printf(s, "notifies %8d, ", 461 - atomic_read(&qh->stats.notifies)); 462 - seq_printf(s, "push errors %8d, ", 463 - atomic_read(&qh->stats.push_errors)); 464 - seq_printf(s, "pop errors %8d\n", 465 - atomic_read(&qh->stats.pop_errors)); 437 + for_each_possible_cpu(cpu) { 438 + pushes += per_cpu_ptr(qh->stats, cpu)->pushes; 439 + pops += per_cpu_ptr(qh->stats, cpu)->pops; 440 + push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors; 441 + pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors; 442 + notifies += per_cpu_ptr(qh->stats, cpu)->notifies; 443 + } 444 + 445 + seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n", 446 + qh, 447 + pushes, 448 + pops, 449 + knav_queue_get_count(qh), 450 + notifies, 451 + push_errors, 452 + pop_errors); 466 453 } 467 454 } 468 455 ··· 582 563 if (range->ops && range->ops->close_queue) 583 564 range->ops->close_queue(range, inst); 584 565 } 566 + free_percpu(qh->stats); 585 567 devm_kfree(inst->kdev->dev, qh); 586 568 } 587 569 EXPORT_SYMBOL_GPL(knav_queue_close); ··· 656 636 val = (u32)dma | ((size / 16) - 1); 657 637 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); 658 638 659 - atomic_inc(&qh->stats.pushes); 639 + this_cpu_inc(qh->stats->pushes); 660 640 return 0; 661 641 } 662 642 EXPORT_SYMBOL_GPL(knav_queue_push); ··· 694 674 if (size) 695 675 *size = ((val & DESC_SIZE_MASK) + 1) * 16; 696 676 697 - atomic_inc(&qh->stats.pops); 677 + this_cpu_inc(qh->stats->pops); 698 678 return dma; 699 679 } 700 680 EXPORT_SYMBOL_GPL(knav_queue_pop);
+17
include/dt-bindings/memory/tegra114-mc.h
··· 23 23 #define TEGRA_SWGROUP_EMUCIF 18 24 24 #define TEGRA_SWGROUP_TSEC 19 25 25 26 + #define TEGRA114_MC_RESET_AVPC 0 27 + #define TEGRA114_MC_RESET_DC 1 28 + #define TEGRA114_MC_RESET_DCB 2 29 + #define TEGRA114_MC_RESET_EPP 3 30 + #define TEGRA114_MC_RESET_2D 4 31 + #define TEGRA114_MC_RESET_HC 5 32 + #define TEGRA114_MC_RESET_HDA 6 33 + #define TEGRA114_MC_RESET_ISP 7 34 + #define TEGRA114_MC_RESET_MPCORE 8 35 + #define TEGRA114_MC_RESET_MPCORELP 9 36 + #define TEGRA114_MC_RESET_MPE 10 37 + #define TEGRA114_MC_RESET_3D 11 38 + #define TEGRA114_MC_RESET_3D2 12 39 + #define TEGRA114_MC_RESET_PPCS 13 40 + #define TEGRA114_MC_RESET_VDE 14 41 + #define TEGRA114_MC_RESET_VI 15 42 + 26 43 #endif
+25
include/dt-bindings/memory/tegra124-mc.h
··· 29 29 #define TEGRA_SWGROUP_VIC 24 30 30 #define TEGRA_SWGROUP_VI 25 31 31 32 + #define TEGRA124_MC_RESET_AFI 0 33 + #define TEGRA124_MC_RESET_AVPC 1 34 + #define TEGRA124_MC_RESET_DC 2 35 + #define TEGRA124_MC_RESET_DCB 3 36 + #define TEGRA124_MC_RESET_HC 4 37 + #define TEGRA124_MC_RESET_HDA 5 38 + #define TEGRA124_MC_RESET_ISP2 6 39 + #define TEGRA124_MC_RESET_MPCORE 7 40 + #define TEGRA124_MC_RESET_MPCORELP 8 41 + #define TEGRA124_MC_RESET_MSENC 9 42 + #define TEGRA124_MC_RESET_PPCS 10 43 + #define TEGRA124_MC_RESET_SATA 11 44 + #define TEGRA124_MC_RESET_VDE 12 45 + #define TEGRA124_MC_RESET_VI 13 46 + #define TEGRA124_MC_RESET_VIC 14 47 + #define TEGRA124_MC_RESET_XUSB_HOST 15 48 + #define TEGRA124_MC_RESET_XUSB_DEV 16 49 + #define TEGRA124_MC_RESET_TSEC 17 50 + #define TEGRA124_MC_RESET_SDMMC1 18 51 + #define TEGRA124_MC_RESET_SDMMC2 19 52 + #define TEGRA124_MC_RESET_SDMMC3 20 53 + #define TEGRA124_MC_RESET_SDMMC4 21 54 + #define TEGRA124_MC_RESET_ISP2B 22 55 + #define TEGRA124_MC_RESET_GPU 23 56 + 32 57 #endif
+21
include/dt-bindings/memory/tegra20-mc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef DT_BINDINGS_MEMORY_TEGRA20_MC_H 3 + #define DT_BINDINGS_MEMORY_TEGRA20_MC_H 4 + 5 + #define TEGRA20_MC_RESET_AVPC 0 6 + #define TEGRA20_MC_RESET_DC 1 7 + #define TEGRA20_MC_RESET_DCB 2 8 + #define TEGRA20_MC_RESET_EPP 3 9 + #define TEGRA20_MC_RESET_2D 4 10 + #define TEGRA20_MC_RESET_HC 5 11 + #define TEGRA20_MC_RESET_ISP 6 12 + #define TEGRA20_MC_RESET_MPCORE 7 13 + #define TEGRA20_MC_RESET_MPEA 8 14 + #define TEGRA20_MC_RESET_MPEB 9 15 + #define TEGRA20_MC_RESET_MPEC 10 16 + #define TEGRA20_MC_RESET_3D 11 17 + #define TEGRA20_MC_RESET_PPCS 12 18 + #define TEGRA20_MC_RESET_VDE 13 19 + #define TEGRA20_MC_RESET_VI 14 20 + 21 + #endif
+31
include/dt-bindings/memory/tegra210-mc.h
··· 34 34 #define TEGRA_SWGROUP_ETR 29 35 35 #define TEGRA_SWGROUP_TSECB 30 36 36 37 + #define TEGRA210_MC_RESET_AFI 0 38 + #define TEGRA210_MC_RESET_AVPC 1 39 + #define TEGRA210_MC_RESET_DC 2 40 + #define TEGRA210_MC_RESET_DCB 3 41 + #define TEGRA210_MC_RESET_HC 4 42 + #define TEGRA210_MC_RESET_HDA 5 43 + #define TEGRA210_MC_RESET_ISP2 6 44 + #define TEGRA210_MC_RESET_MPCORE 7 45 + #define TEGRA210_MC_RESET_NVENC 8 46 + #define TEGRA210_MC_RESET_PPCS 9 47 + #define TEGRA210_MC_RESET_SATA 10 48 + #define TEGRA210_MC_RESET_VI 11 49 + #define TEGRA210_MC_RESET_VIC 12 50 + #define TEGRA210_MC_RESET_XUSB_HOST 13 51 + #define TEGRA210_MC_RESET_XUSB_DEV 14 52 + #define TEGRA210_MC_RESET_A9AVP 15 53 + #define TEGRA210_MC_RESET_TSEC 16 54 + #define TEGRA210_MC_RESET_SDMMC1 17 55 + #define TEGRA210_MC_RESET_SDMMC2 18 56 + #define TEGRA210_MC_RESET_SDMMC3 19 57 + #define TEGRA210_MC_RESET_SDMMC4 20 58 + #define TEGRA210_MC_RESET_ISP2B 21 59 + #define TEGRA210_MC_RESET_GPU 22 60 + #define TEGRA210_MC_RESET_NVDEC 23 61 + #define TEGRA210_MC_RESET_APE 24 62 + #define TEGRA210_MC_RESET_SE 25 63 + #define TEGRA210_MC_RESET_NVJPG 26 64 + #define TEGRA210_MC_RESET_AXIAP 27 65 + #define TEGRA210_MC_RESET_ETR 28 66 + #define TEGRA210_MC_RESET_TSECB 29 67 + 37 68 #endif
+19
include/dt-bindings/memory/tegra30-mc.h
··· 22 22 #define TEGRA_SWGROUP_MPCORE 17 23 23 #define TEGRA_SWGROUP_ISP 18 24 24 25 + #define TEGRA30_MC_RESET_AFI 0 26 + #define TEGRA30_MC_RESET_AVPC 1 27 + #define TEGRA30_MC_RESET_DC 2 28 + #define TEGRA30_MC_RESET_DCB 3 29 + #define TEGRA30_MC_RESET_EPP 4 30 + #define TEGRA30_MC_RESET_2D 5 31 + #define TEGRA30_MC_RESET_HC 6 32 + #define TEGRA30_MC_RESET_HDA 7 33 + #define TEGRA30_MC_RESET_ISP 8 34 + #define TEGRA30_MC_RESET_MPCORE 9 35 + #define TEGRA30_MC_RESET_MPCORELP 10 36 + #define TEGRA30_MC_RESET_MPE 11 37 + #define TEGRA30_MC_RESET_3D 12 38 + #define TEGRA30_MC_RESET_3D2 13 39 + #define TEGRA30_MC_RESET_PPCS 14 40 + #define TEGRA30_MC_RESET_SATA 15 41 + #define TEGRA30_MC_RESET_VDE 16 42 + #define TEGRA30_MC_RESET_VI 17 43 + 25 44 #endif
+27
include/dt-bindings/power/px30-power.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __DT_BINDINGS_POWER_PX30_POWER_H__ 3 + #define __DT_BINDINGS_POWER_PX30_POWER_H__ 4 + 5 + /* VD_CORE */ 6 + #define PX30_PD_A35_0 0 7 + #define PX30_PD_A35_1 1 8 + #define PX30_PD_A35_2 2 9 + #define PX30_PD_A35_3 3 10 + #define PX30_PD_SCU 4 11 + 12 + /* VD_LOGIC */ 13 + #define PX30_PD_USB 5 14 + #define PX30_PD_DDR 6 15 + #define PX30_PD_SDCARD 7 16 + #define PX30_PD_CRYPTO 8 17 + #define PX30_PD_GMAC 9 18 + #define PX30_PD_MMC_NAND 10 19 + #define PX30_PD_VPU 11 20 + #define PX30_PD_VO 12 21 + #define PX30_PD_VI 13 22 + #define PX30_PD_GPU 14 23 + 24 + /* VD_PMU */ 25 + #define PX30_PD_PMU 15 26 + 27 + #endif
+13
include/dt-bindings/power/rk3036-power.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __DT_BINDINGS_POWER_RK3036_POWER_H__ 3 + #define __DT_BINDINGS_POWER_RK3036_POWER_H__ 4 + 5 + #define RK3036_PD_MSCH 0 6 + #define RK3036_PD_CORE 1 7 + #define RK3036_PD_PERI 2 8 + #define RK3036_PD_VIO 3 9 + #define RK3036_PD_VPU 4 10 + #define RK3036_PD_GPU 5 11 + #define RK3036_PD_SYS 6 12 + 13 + #endif
+14
include/dt-bindings/power/rk3128-power.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __DT_BINDINGS_POWER_RK3128_POWER_H__ 3 + #define __DT_BINDINGS_POWER_RK3128_POWER_H__ 4 + 5 + /* VD_CORE */ 6 + #define RK3128_PD_CORE 0 7 + 8 + /* VD_LOGIC */ 9 + #define RK3128_PD_VIO 1 10 + #define RK3128_PD_VIDEO 2 11 + #define RK3128_PD_GPU 3 12 + #define RK3128_PD_MSCH 4 13 + 14 + #endif
+21
include/dt-bindings/power/rk3228-power.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __DT_BINDINGS_POWER_RK3228_POWER_H__ 3 + #define __DT_BINDINGS_POWER_RK3228_POWER_H__ 4 + 5 + /** 6 + * RK3228 idle id Summary. 7 + */ 8 + 9 + #define RK3228_PD_CORE 0 10 + #define RK3228_PD_MSCH 1 11 + #define RK3228_PD_BUS 2 12 + #define RK3228_PD_SYS 3 13 + #define RK3228_PD_VIO 4 14 + #define RK3228_PD_VOP 5 15 + #define RK3228_PD_VPU 6 16 + #define RK3228_PD_RKVDEC 7 17 + #define RK3228_PD_GPU 8 18 + #define RK3228_PD_PERI 9 19 + #define RK3228_PD_GMAC 10 20 + 21 + #endif
+25
include/linux/platform_data/ti-aemif.h
··· 16 16 17 17 #include <linux/of_platform.h> 18 18 19 + /** 20 + * struct aemif_abus_data - Async bus configuration parameters. 21 + * 22 + * @cs - Chip-select number. 23 + */ 24 + struct aemif_abus_data { 25 + u32 cs; 26 + }; 27 + 28 + /** 29 + * struct aemif_platform_data - Data to set up the TI aemif driver. 30 + * 31 + * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif 32 + * subdevices. 33 + * @cs_offset: Lowest allowed chip-select number. 34 + * @abus_data: Array of async bus configuration entries. 35 + * @num_abus_data: Number of abus entries. 36 + * @sub_devices: Array of platform subdevices. 37 + * @num_sub_devices: Number of subdevices. 38 + */ 19 39 struct aemif_platform_data { 20 40 struct of_dev_auxdata *dev_lookup; 41 + u32 cs_offset; 42 + struct aemif_abus_data *abus_data; 43 + size_t num_abus_data; 44 + struct platform_device *sub_devices; 45 + size_t num_sub_devices; 21 46 }; 22 47 23 48 #endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
+13 -5
include/linux/scmi_protocol.h
··· 85 85 * @level_set: sets the performance level of a domain 86 86 * @level_get: gets the performance level of a domain 87 87 * @device_domain_id: gets the scmi domain id for a given device 88 - * @get_transition_latency: gets the DVFS transition latency for a given device 89 - * @add_opps_to_device: adds all the OPPs for a given device 88 + * @transition_latency_get: gets the DVFS transition latency for a given device 89 + * @device_opps_add: adds all the OPPs for a given device 90 90 * @freq_set: sets the frequency for a given device using sustained frequency 91 91 * to sustained performance level mapping 92 92 * @freq_get: gets the frequency for a given device using sustained frequency ··· 102 102 int (*level_get)(const struct scmi_handle *handle, u32 domain, 103 103 u32 *level, bool poll); 104 104 int (*device_domain_id)(struct device *dev); 105 - int (*get_transition_latency)(const struct scmi_handle *handle, 105 + int (*transition_latency_get)(const struct scmi_handle *handle, 106 106 struct device *dev); 107 - int (*add_opps_to_device)(const struct scmi_handle *handle, 108 - struct device *dev); 107 + int (*device_opps_add)(const struct scmi_handle *handle, 108 + struct device *dev); 109 109 int (*freq_set)(const struct scmi_handle *handle, u32 domain, 110 110 unsigned long rate, bool poll); 111 111 int (*freq_get)(const struct scmi_handle *handle, u32 domain, ··· 189 189 * @perf_ops: pointer to set of performance protocol operations 190 190 * @clk_ops: pointer to set of clock protocol operations 191 191 * @sensor_ops: pointer to set of sensor protocol operations 192 + * @perf_priv: pointer to private data structure specific to performance 193 + * protocol(for internal use only) 194 + * @clk_priv: pointer to private data structure specific to clock 195 + * protocol(for internal use only) 196 + * @power_priv: pointer to private data structure specific to power 197 + * protocol(for internal use only) 198 + * @sensor_priv: pointer to private data structure specific to sensors 199 + * protocol(for internal use only) 192 200 */ 193 201 struct scmi_handle { 194 202 struct device *dev;
+1 -9
include/linux/soc/ti/ti_sci_protocol.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Texas Instruments System Control Interface Protocol 3 4 * 4 5 * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ 5 6 * Nishanth Menon 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License version 2 as 9 - * published by the Free Software Foundation. 10 - * 11 - * This program is distributed "as is" WITHOUT ANY WARRANTY of any 12 - * kind, whether express or implied; without even the implied warranty 13 - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 7 */ 16 8 17 9 #ifndef __TISCI_PROTOCOL_H
+1 -1
include/soc/tegra/cpuidle.h
··· 14 14 #ifndef __SOC_TEGRA_CPUIDLE_H__ 15 15 #define __SOC_TEGRA_CPUIDLE_H__ 16 16 17 - #if defined(CONFIG_ARM) && defined(CONFIG_CPU_IDLE) 17 + #if defined(CONFIG_ARM) && defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_CPU_IDLE) 18 18 void tegra_cpuidle_pcie_irqs_in_use(void); 19 19 #else 20 20 static inline void tegra_cpuidle_pcie_irqs_in_use(void)
+36 -1
include/soc/tegra/mc.h
··· 9 9 #ifndef __SOC_TEGRA_MC_H__ 10 10 #define __SOC_TEGRA_MC_H__ 11 11 12 + #include <linux/reset-controller.h> 12 13 #include <linux/types.h> 13 14 14 15 struct clk; ··· 96 95 } 97 96 #endif 98 97 98 + struct tegra_mc_reset { 99 + const char *name; 100 + unsigned long id; 101 + unsigned int control; 102 + unsigned int status; 103 + unsigned int reset; 104 + unsigned int bit; 105 + }; 106 + 107 + struct tegra_mc_reset_ops { 108 + int (*hotreset_assert)(struct tegra_mc *mc, 109 + const struct tegra_mc_reset *rst); 110 + int (*hotreset_deassert)(struct tegra_mc *mc, 111 + const struct tegra_mc_reset *rst); 112 + int (*block_dma)(struct tegra_mc *mc, 113 + const struct tegra_mc_reset *rst); 114 + bool (*dma_idling)(struct tegra_mc *mc, 115 + const struct tegra_mc_reset *rst); 116 + int (*unblock_dma)(struct tegra_mc *mc, 117 + const struct tegra_mc_reset *rst); 118 + int (*reset_status)(struct tegra_mc *mc, 119 + const struct tegra_mc_reset *rst); 120 + }; 121 + 99 122 struct tegra_mc_soc { 100 123 const struct tegra_mc_client *clients; 101 124 unsigned int num_clients; ··· 133 108 u8 client_id_mask; 134 109 135 110 const struct tegra_smmu_soc *smmu; 111 + 112 + u32 intmask; 113 + 114 + const struct tegra_mc_reset_ops *reset_ops; 115 + const struct tegra_mc_reset *resets; 116 + unsigned int num_resets; 136 117 }; 137 118 138 119 struct tegra_mc { 139 120 struct device *dev; 140 121 struct tegra_smmu *smmu; 141 - void __iomem *regs; 122 + void __iomem *regs, *regs2; 142 123 struct clk *clk; 143 124 int irq; 144 125 ··· 153 122 154 123 struct tegra_mc_timing *timings; 155 124 unsigned int num_timings; 125 + 126 + struct reset_controller_dev reset; 127 + 128 + spinlock_t lock; 156 129 }; 157 130 158 131 void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);