Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

accel/ivpu: Split IP and buttress code

The NPU device consists of two parts: NPU buttress and NPU IP.
Buttress is a platform specific part that integrates the NPU IP with
the CPU.
NPU IP is the platform agnostic part that does the inference.

This separation enables support for multiple platforms using
a single NPU IP, so for example NPU IP 37XX could be integrated into
MTL and LNL platforms.

Signed-off-by: Wachowski, Karol <karol.wachowski@intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240515113006.457472-3-jacek.lawrynowicz@linux.intel.com

authored by

Wachowski, Karol and committed by
Jacek Lawrynowicz
8a27ad81 302d5832

+2588 -2502
+3 -2
drivers/accel/ivpu/Makefile
··· 6 6 ivpu_fw.o \ 7 7 ivpu_fw_log.o \ 8 8 ivpu_gem.o \ 9 - ivpu_hw_37xx.o \ 10 - ivpu_hw_40xx.o \ 9 + ivpu_hw.o \ 10 + ivpu_hw_btrs.o \ 11 + ivpu_hw_ip.o \ 11 12 ivpu_ipc.o \ 12 13 ivpu_job.o \ 13 14 ivpu_jsm_msg.o \
+1 -1
drivers/accel/ivpu/ivpu_debugfs.c
··· 409 409 debugfs_create_file("resume_engine", 0200, debugfs_root, vdev, 410 410 &ivpu_resume_engine_fops); 411 411 412 - if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) 412 + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) 413 413 debugfs_create_file("fw_profiling_freq_drive", 0200, 414 414 debugfs_root, vdev, &fw_profiling_freq_fops); 415 415 }
+6 -7
drivers/accel/ivpu/ivpu_drv.c
··· 464 464 return ret; 465 465 } 466 466 467 + ivpu_irq_handlers_init(vdev); 468 + 467 469 vdev->irq = pci_irq_vector(pdev, 0); 468 470 469 - ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, 471 + ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler, 470 472 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev); 471 473 if (ret) 472 474 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); ··· 545 543 if (!vdev->pm) 546 544 return -ENOMEM; 547 545 548 - if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) { 549 - vdev->hw->ops = &ivpu_hw_40xx_ops; 546 + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) 550 547 vdev->hw->dma_bits = 48; 551 - } else { 552 - vdev->hw->ops = &ivpu_hw_37xx_ops; 548 + else 553 549 vdev->hw->dma_bits = 38; 554 - } 555 550 556 551 vdev->platform = IVPU_PLATFORM_INVALID; 557 552 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; ··· 577 578 goto err_xa_destroy; 578 579 579 580 /* Init basic HW info based on buttress registers which are accessible before power up */ 580 - ret = ivpu_hw_info_init(vdev); 581 + ret = ivpu_hw_init(vdev); 581 582 if (ret) 582 583 goto err_xa_destroy; 583 584
+27 -6
drivers/accel/ivpu/ivpu_drv.h
··· 27 27 #define PCI_DEVICE_ID_ARL 0xad1d 28 28 #define PCI_DEVICE_ID_LNL 0x643e 29 29 30 - #define IVPU_HW_37XX 37 31 - #define IVPU_HW_40XX 40 30 + #define IVPU_HW_IP_37XX 37 31 + #define IVPU_HW_IP_40XX 40 32 + #define IVPU_HW_IP_50XX 50 33 + #define IVPU_HW_IP_60XX 60 34 + 35 + #define IVPU_HW_BTRS_MTL 1 36 + #define IVPU_HW_BTRS_LNL 2 32 37 33 38 #define IVPU_GLOBAL_CONTEXT_MMU_SSID 0 34 39 /* SSID 1 is used by the VPU to represent reserved context */ ··· 203 198 return to_pci_dev(vdev->drm.dev)->device; 204 199 } 205 200 206 - static inline int ivpu_hw_gen(struct ivpu_device *vdev) 201 + static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) 207 202 { 208 203 switch (ivpu_device_id(vdev)) { 209 204 case PCI_DEVICE_ID_MTL: 210 205 case PCI_DEVICE_ID_ARL: 211 - return IVPU_HW_37XX; 206 + return IVPU_HW_IP_37XX; 212 207 case PCI_DEVICE_ID_LNL: 213 - return IVPU_HW_40XX; 208 + return IVPU_HW_IP_40XX; 214 209 default: 215 - ivpu_err(vdev, "Unknown NPU device\n"); 210 + dump_stack(); 211 + ivpu_err(vdev, "Unknown NPU IP generation\n"); 212 + return 0; 213 + } 214 + } 215 + 216 + static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) 217 + { 218 + switch (ivpu_device_id(vdev)) { 219 + case PCI_DEVICE_ID_MTL: 220 + case PCI_DEVICE_ID_ARL: 221 + return IVPU_HW_BTRS_MTL; 222 + case PCI_DEVICE_ID_LNL: 223 + return IVPU_HW_BTRS_LNL; 224 + default: 225 + dump_stack(); 226 + ivpu_err(vdev, "Unknown buttress generation\n"); 216 227 return 0; 217 228 } 218 229 }
+10 -10
drivers/accel/ivpu/ivpu_fw.c
··· 54 54 int gen; 55 55 const char *name; 56 56 } fw_names[] = { 57 - { IVPU_HW_37XX, "vpu_37xx.bin" }, 58 - { IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" }, 59 - { IVPU_HW_40XX, "vpu_40xx.bin" }, 60 - { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, 57 + { IVPU_HW_IP_37XX, "vpu_37xx.bin" }, 58 + { IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" }, 59 + { IVPU_HW_IP_40XX, "vpu_40xx.bin" }, 60 + { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, 61 61 }; 62 62 63 63 static int ivpu_fw_request(struct ivpu_device *vdev) ··· 73 73 } 74 74 75 75 for (i = 0; i < ARRAY_SIZE(fw_names); i++) { 76 - if (fw_names[i].gen != ivpu_hw_gen(vdev)) 76 + if (fw_names[i].gen != ivpu_hw_ip_gen(vdev)) 77 77 continue; 78 78 79 79 ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev); ··· 246 246 return -EINVAL; 247 247 } 248 248 249 - ivpu_hw_init_range(&vdev->hw->ranges.global, start, size); 249 + ivpu_hw_range_init(&vdev->hw->ranges.global, start, size); 250 250 return 0; 251 251 } 252 252 ··· 511 511 512 512 boot_params->magic = VPU_BOOT_PARAMS_MAGIC; 513 513 boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number; 514 - boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev); 514 + boot_params->frequency = ivpu_hw_pll_freq_get(vdev); 515 515 516 516 /* 517 517 * This param is a debug firmware feature. It switches default clock ··· 568 568 boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr; 569 569 boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb); 570 570 571 - boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev); 572 - boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); 573 - boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); 571 + boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev); 572 + boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev); 573 + boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev); 574 574 boot_params->vpu_scheduling_mode = vdev->hw->sched_mode; 575 575 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) 576 576 boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
+310
drivers/accel/ivpu/ivpu_hw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2020 - 2024 Intel Corporation 4 + */ 5 + 6 + #include "ivpu_drv.h" 7 + #include "ivpu_hw.h" 8 + #include "ivpu_hw_btrs.h" 9 + #include "ivpu_hw_ip.h" 10 + 11 + #include <linux/dmi.h> 12 + 13 + static char *platform_to_str(u32 platform) 14 + { 15 + switch (platform) { 16 + case IVPU_PLATFORM_SILICON: 17 + return "SILICON"; 18 + case IVPU_PLATFORM_SIMICS: 19 + return "SIMICS"; 20 + case IVPU_PLATFORM_FPGA: 21 + return "FPGA"; 22 + default: 23 + return "Invalid platform"; 24 + } 25 + } 26 + 27 + static const struct dmi_system_id dmi_platform_simulation[] = { 28 + { 29 + .ident = "Intel Simics", 30 + .matches = { 31 + DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"), 32 + DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 33 + DMI_MATCH(DMI_BOARD_SERIAL, "123456789"), 34 + }, 35 + }, 36 + { 37 + .ident = "Intel Simics", 38 + .matches = { 39 + DMI_MATCH(DMI_BOARD_NAME, "Simics"), 40 + }, 41 + }, 42 + { } 43 + }; 44 + 45 + static void platform_init(struct ivpu_device *vdev) 46 + { 47 + if (dmi_check_system(dmi_platform_simulation)) 48 + vdev->platform = IVPU_PLATFORM_SIMICS; 49 + else 50 + vdev->platform = IVPU_PLATFORM_SILICON; 51 + 52 + ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", 53 + platform_to_str(vdev->platform), vdev->platform); 54 + } 55 + 56 + static void wa_init(struct ivpu_device *vdev) 57 + { 58 + vdev->wa.punit_disabled = ivpu_is_fpga(vdev); 59 + vdev->wa.clear_runtime_mem = false; 60 + 61 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 62 + vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev); 63 + 64 + if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL) 65 + vdev->wa.disable_clock_relinquish = true; 66 + 67 + IVPU_PRINT_WA(punit_disabled); 68 + IVPU_PRINT_WA(clear_runtime_mem); 69 + IVPU_PRINT_WA(interrupt_clear_with_0); 70 + IVPU_PRINT_WA(disable_clock_relinquish); 71 + } 72 + 73 + static void timeouts_init(struct ivpu_device *vdev) 74 + { 75 + if (ivpu_is_fpga(vdev)) { 76 + vdev->timeout.boot = 100000; 77 + vdev->timeout.jsm = 50000; 78 + vdev->timeout.tdr = 2000000; 79 + vdev->timeout.reschedule_suspend = 1000; 80 + vdev->timeout.autosuspend = -1; 81 + vdev->timeout.d0i3_entry_msg = 500; 82 + } else if (ivpu_is_simics(vdev)) { 83 + vdev->timeout.boot = 50; 84 + vdev->timeout.jsm = 500; 85 + vdev->timeout.tdr = 10000; 86 + vdev->timeout.reschedule_suspend = 10; 87 + vdev->timeout.autosuspend = -1; 88 + vdev->timeout.d0i3_entry_msg = 100; 89 + } else { 90 + vdev->timeout.boot = 1000; 91 + vdev->timeout.jsm = 500; 92 + vdev->timeout.tdr = 2000; 93 + vdev->timeout.reschedule_suspend = 10; 94 + vdev->timeout.autosuspend = 10; 95 + vdev->timeout.d0i3_entry_msg = 5; 96 + } 97 + } 98 + 99 + static void memory_ranges_init(struct ivpu_device *vdev) 100 + { 101 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 102 + ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 103 + ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M); 104 + ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); 105 + ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); 106 + } else { 107 + ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 108 + ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M); 109 + ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); 110 + ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); 111 + } 112 + } 113 + 114 + static int wp_enable(struct ivpu_device *vdev) 115 + { 116 + return ivpu_hw_btrs_wp_drive(vdev, true); 117 + } 118 + 119 + static int wp_disable(struct ivpu_device *vdev) 120 + { 121 + return ivpu_hw_btrs_wp_drive(vdev, false); 122 + } 123 + 124 + int ivpu_hw_power_up(struct ivpu_device *vdev) 125 + { 126 + int ret; 127 + 128 + ret = ivpu_hw_btrs_d0i3_disable(vdev); 129 + if (ret) 130 + ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 131 + 132 + ret = wp_enable(vdev); 133 + if (ret) { 134 + ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret); 135 + return ret; 136 + } 137 + 138 + if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) { 139 + if (IVPU_WA(disable_clock_relinquish)) 140 + ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev); 141 + ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev); 142 + ivpu_hw_btrs_ats_print_lnl(vdev); 143 + } 144 + 145 + ret = ivpu_hw_ip_host_ss_configure(vdev); 146 + if (ret) { 147 + ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 148 + return ret; 149 + } 150 + 151 + ivpu_hw_ip_idle_gen_disable(vdev); 152 + 153 + ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev); 154 + if (ret) { 155 + ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n"); 156 + return ret; 157 + } 158 + 159 + ret = ivpu_hw_ip_pwr_domain_enable(vdev); 160 + if (ret) { 161 + ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 162 + return ret; 163 + } 164 + 165 + ret = ivpu_hw_ip_host_ss_axi_enable(vdev); 166 + if (ret) { 167 + ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 168 + return ret; 169 + } 170 + 171 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL) 172 + ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev); 173 + 174 + ret = ivpu_hw_ip_top_noc_enable(vdev); 175 + if (ret) 176 + ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 177 + 178 + return ret; 179 + } 180 + 181 + static void save_d0i3_entry_timestamp(struct ivpu_device *vdev) 182 + { 183 + vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); 184 + vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev); 185 + } 186 + 187 + int ivpu_hw_reset(struct ivpu_device *vdev) 188 + { 189 + int ret = 0; 190 + 191 + if (ivpu_hw_btrs_ip_reset(vdev)) { 192 + ivpu_err(vdev, "Failed to reset NPU IP\n"); 193 + ret = -EIO; 194 + } 195 + 196 + if (wp_disable(vdev)) { 197 + ivpu_err(vdev, "Failed to disable workpoint\n"); 198 + ret = -EIO; 199 + } 200 + 201 + return ret; 202 + } 203 + 204 + int ivpu_hw_power_down(struct ivpu_device *vdev) 205 + { 206 + int ret = 0; 207 + 208 + save_d0i3_entry_timestamp(vdev); 209 + 210 + if (!ivpu_hw_is_idle(vdev)) 211 + ivpu_warn(vdev, "NPU not idle during power down\n"); 212 + 213 + if (ivpu_hw_reset(vdev)) { 214 + ivpu_err(vdev, "Failed to reset NPU\n"); 215 + ret = -EIO; 216 + } 217 + 218 + if (ivpu_hw_btrs_d0i3_enable(vdev)) { 219 + ivpu_err(vdev, "Failed to enter D0I3\n"); 220 + ret = -EIO; 221 + } 222 + 223 + return ret; 224 + } 225 + 226 + int ivpu_hw_init(struct ivpu_device *vdev) 227 + { 228 + ivpu_hw_btrs_info_init(vdev); 229 + ivpu_hw_btrs_freq_ratios_init(vdev); 230 + memory_ranges_init(vdev); 231 + platform_init(vdev); 232 + wa_init(vdev); 233 + timeouts_init(vdev); 234 + 235 + return 0; 236 + } 237 + 238 + int ivpu_hw_boot_fw(struct ivpu_device *vdev) 239 + { 240 + int ret; 241 + 242 + ivpu_hw_ip_snoop_disable(vdev); 243 + ivpu_hw_ip_tbu_mmu_enable(vdev); 244 + ret = ivpu_hw_ip_soc_cpu_boot(vdev); 245 + if (ret) 246 + ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret); 247 + 248 + return ret; 249 + } 250 + 251 + void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable) 252 + { 253 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 254 + vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 255 + return; 256 + } 257 + 258 + if (enable) 259 + vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH; 260 + else 261 + vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 262 + } 263 + 264 + void ivpu_irq_handlers_init(struct ivpu_device *vdev) 265 + { 266 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 267 + vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx; 268 + else 269 + vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx; 270 + 271 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 272 + vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl; 273 + else 274 + vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl; 275 + } 276 + 277 + void ivpu_hw_irq_enable(struct ivpu_device *vdev) 278 + { 279 + ivpu_hw_ip_irq_enable(vdev); 280 + ivpu_hw_btrs_irq_enable(vdev); 281 + } 282 + 283 + void ivpu_hw_irq_disable(struct ivpu_device *vdev) 284 + { 285 + ivpu_hw_btrs_irq_disable(vdev); 286 + ivpu_hw_ip_irq_disable(vdev); 287 + } 288 + 289 + irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr) 290 + { 291 + bool ip_handled, btrs_handled, wake_thread = false; 292 + struct ivpu_device *vdev = ptr; 293 + 294 + ivpu_hw_btrs_global_int_disable(vdev); 295 + 296 + btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq); 297 + if (!ivpu_hw_is_idle((vdev)) || !btrs_handled) 298 + ip_handled = ivpu_hw_ip_irq_handler(vdev, irq, &wake_thread); 299 + else 300 + ip_handled = false; 301 + 302 + /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 303 + ivpu_hw_btrs_global_int_enable(vdev); 304 + 305 + if (wake_thread) 306 + return IRQ_WAKE_THREAD; 307 + if (ip_handled || btrs_handled) 308 + return IRQ_HANDLED; 309 + return IRQ_NONE; 310 + }
+90 -145
drivers/accel/ivpu/ivpu_hw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * Copyright (C) 2020-2024 Intel Corporation 3 + * Copyright (C) 2020 - 2024 Intel Corporation 4 4 */ 5 5 6 6 #ifndef __IVPU_HW_H__ 7 7 #define __IVPU_HW_H__ 8 8 9 9 #include "ivpu_drv.h" 10 - 11 - struct ivpu_hw_ops { 12 - int (*info_init)(struct ivpu_device *vdev); 13 - int (*power_up)(struct ivpu_device *vdev); 14 - int (*boot_fw)(struct ivpu_device *vdev); 15 - int (*power_down)(struct ivpu_device *vdev); 16 - int (*reset)(struct ivpu_device *vdev); 17 - bool (*is_idle)(struct ivpu_device *vdev); 18 - int (*wait_for_idle)(struct ivpu_device *vdev); 19 - void (*wdt_disable)(struct ivpu_device *vdev); 20 - void (*diagnose_failure)(struct ivpu_device *vdev); 21 - u32 (*profiling_freq_get)(struct ivpu_device *vdev); 22 - void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable); 23 - u32 (*reg_pll_freq_get)(struct ivpu_device *vdev); 24 - u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio); 25 - u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev); 26 - u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev); 27 - u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev); 28 - void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id); 29 - u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev); 30 - u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev); 31 - void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr); 32 - void (*irq_clear)(struct ivpu_device *vdev); 33 - void (*irq_enable)(struct ivpu_device *vdev); 34 - void (*irq_disable)(struct ivpu_device *vdev); 35 - irqreturn_t (*irq_handler)(int irq, void *ptr); 36 - }; 10 + #include "ivpu_hw_btrs.h" 11 + #include "ivpu_hw_ip.h" 37 12 38 13 struct ivpu_addr_range { 39 14 resource_size_t start; ··· 16 41 }; 17 42 18 43 struct ivpu_hw_info { 19 - const struct ivpu_hw_ops *ops; 44 + struct { 45 + bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq); 46 + bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq, bool *wake_thread); 47 + } irq; 20 48 struct { 21 49 struct ivpu_addr_range global; 22 50 struct ivpu_addr_range user; ··· 45 67 u64 d0i3_entry_vpu_ts; 46 68 }; 47 69 48 - extern const struct ivpu_hw_ops ivpu_hw_37xx_ops; 49 - extern const struct ivpu_hw_ops ivpu_hw_40xx_ops; 70 + int ivpu_hw_init(struct ivpu_device *vdev); 71 + int ivpu_hw_power_up(struct ivpu_device *vdev); 72 + int ivpu_hw_power_down(struct ivpu_device *vdev); 73 + int ivpu_hw_reset(struct ivpu_device *vdev); 74 + int ivpu_hw_boot_fw(struct ivpu_device *vdev); 75 + void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable); 76 + void ivpu_irq_handlers_init(struct ivpu_device *vdev); 77 + void ivpu_hw_irq_enable(struct ivpu_device *vdev); 78 + void ivpu_hw_irq_disable(struct ivpu_device *vdev); 79 + irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr); 50 80 51 - static inline int ivpu_hw_info_init(struct ivpu_device *vdev) 81 + static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq) 52 82 { 53 - return vdev->hw->ops->info_init(vdev); 54 - }; 55 - 56 - static inline int ivpu_hw_power_up(struct ivpu_device *vdev) 57 - { 58 - ivpu_dbg(vdev, PM, "HW power up\n"); 59 - 60 - return vdev->hw->ops->power_up(vdev); 61 - }; 62 - 63 - static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev) 64 - { 65 - return vdev->hw->ops->boot_fw(vdev); 66 - }; 67 - 68 - static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev) 69 - { 70 - return vdev->hw->ops->is_idle(vdev); 71 - }; 72 - 73 - static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev) 74 - { 75 - return vdev->hw->ops->wait_for_idle(vdev); 76 - }; 77 - 78 - static inline int ivpu_hw_power_down(struct ivpu_device *vdev) 79 - { 80 - ivpu_dbg(vdev, PM, "HW power down\n"); 81 - 82 - return vdev->hw->ops->power_down(vdev); 83 - }; 84 - 85 - static inline int ivpu_hw_reset(struct ivpu_device *vdev) 86 - { 87 - ivpu_dbg(vdev, PM, "HW reset\n"); 88 - 89 - return vdev->hw->ops->reset(vdev); 90 - }; 91 - 92 - static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev) 93 - { 94 - vdev->hw->ops->wdt_disable(vdev); 95 - }; 96 - 97 - static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev) 98 - { 99 - return vdev->hw->ops->profiling_freq_get(vdev); 100 - }; 101 - 102 - static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable) 103 - { 104 - return vdev->hw->ops->profiling_freq_drive(vdev, enable); 105 - }; 106 - 107 - /* Register indirect accesses */ 108 - static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev) 109 - { 110 - return vdev->hw->ops->reg_pll_freq_get(vdev); 111 - }; 112 - 113 - static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 114 - { 115 - return vdev->hw->ops->ratio_to_freq(vdev, ratio); 83 + return vdev->hw->irq.btrs_irq_handler(vdev, irq); 116 84 } 117 85 118 - static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev) 86 + static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) 119 87 { 120 - return vdev->hw->ops->reg_telemetry_offset_get(vdev); 121 - }; 88 + return vdev->hw->irq.ip_irq_handler(vdev, irq, wake_thread); 89 + } 122 90 123 - static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev) 124 - { 125 - return vdev->hw->ops->reg_telemetry_size_get(vdev); 126 - }; 127 - 128 - static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev) 129 - { 130 - return vdev->hw->ops->reg_telemetry_enable_get(vdev); 131 - }; 132 - 133 - static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id) 134 - { 135 - vdev->hw->ops->reg_db_set(vdev, db_id); 136 - }; 137 - 138 - static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 139 - { 140 - return vdev->hw->ops->reg_ipc_rx_addr_get(vdev); 141 - }; 142 - 143 - static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev) 144 - { 145 - return vdev->hw->ops->reg_ipc_rx_count_get(vdev); 146 - }; 147 - 148 - static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 149 - { 150 - vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr); 151 - }; 152 - 153 - static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev) 154 - { 155 - vdev->hw->ops->irq_clear(vdev); 156 - }; 157 - 158 - static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev) 159 - { 160 - vdev->hw->ops->irq_enable(vdev); 161 - }; 162 - 163 - static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev) 164 - { 165 - vdev->hw->ops->irq_disable(vdev); 166 - }; 167 - 168 - static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size) 91 + static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size) 169 92 { 170 93 range->start = start; 171 94 range->end = start + size; ··· 77 198 return range->end - range->start; 78 199 } 79 200 201 + static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 202 + { 203 + return ivpu_hw_btrs_ratio_to_freq(vdev, ratio); 204 + } 205 + 206 + static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev) 207 + { 208 + ivpu_hw_ip_irq_clear(vdev); 209 + } 210 + 211 + static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev) 212 + { 213 + return ivpu_hw_btrs_pll_freq_get(vdev); 214 + } 215 + 216 + static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev) 217 + { 218 + return vdev->hw->pll.profiling_freq; 219 + } 220 + 80 221 static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev) 81 222 { 82 - vdev->hw->ops->diagnose_failure(vdev); 223 + ivpu_hw_ip_diagnose_failure(vdev); 224 + ivpu_hw_btrs_diagnose_failure(vdev); 225 + } 226 + 227 + static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev) 228 + { 229 + return ivpu_hw_btrs_telemetry_offset_get(vdev); 230 + } 231 + 232 + static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev) 233 + { 234 + return ivpu_hw_btrs_telemetry_size_get(vdev); 235 + } 236 + 237 + static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev) 238 + { 239 + return ivpu_hw_btrs_telemetry_enable_get(vdev); 240 + } 241 + 242 + static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev) 243 + { 244 + return ivpu_hw_btrs_is_idle(vdev); 245 + } 246 + 247 + static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev) 248 + { 249 + return ivpu_hw_btrs_wait_for_idle(vdev); 250 + } 251 + 252 + static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 253 + { 254 + ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr); 255 + } 256 + 257 + static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id) 258 + { 259 + ivpu_hw_ip_db_set(vdev, db_id); 260 + } 261 + 262 + static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev) 263 + { 264 + return ivpu_hw_ip_ipc_rx_addr_get(vdev); 265 + } 266 + 267 + static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev) 268 + { 269 + return ivpu_hw_ip_ipc_rx_count_get(vdev); 83 270 } 84 271 85 272 #endif /* __IVPU_HW_H__ */
-1071
drivers/accel/ivpu/ivpu_hw_37xx.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (C) 2020-2024 Intel Corporation 4 - */ 5 - 6 - #include "ivpu_drv.h" 7 - #include "ivpu_fw.h" 8 - #include "ivpu_hw_btrs_mtl_reg.h" 9 - #include "ivpu_hw_37xx_reg.h" 10 - #include "ivpu_hw_reg_io.h" 11 - #include "ivpu_hw.h" 12 - #include "ivpu_ipc.h" 13 - #include "ivpu_mmu.h" 14 - #include "ivpu_pm.h" 15 - 16 - #define TILE_FUSE_ENABLE_BOTH 0x0 17 - #define TILE_SKU_BOTH 0x3630 18 - 19 - /* Work point configuration values */ 20 - #define CONFIG_1_TILE 0x01 21 - #define CONFIG_2_TILE 0x02 22 - #define PLL_RATIO_5_3 0x01 23 - #define PLL_RATIO_4_3 0x02 24 - #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio)) 25 - #define WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_5_3) 26 - #define WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(CONFIG_1_TILE, PLL_RATIO_4_3) 27 - #define WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_5_3) 28 - #define WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(CONFIG_2_TILE, PLL_RATIO_4_3) 29 - #define WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) 30 - 31 - #define PLL_REF_CLK_FREQ (50 * 1000000) 32 - #define PLL_SIMULATION_FREQ (10 * 1000000) 33 - #define PLL_PROF_CLK_FREQ (38400 * 1000) 34 - #define PLL_DEFAULT_EPP_VALUE 0x80 35 - 36 - #define TIM_SAFE_ENABLE 0xf1d0dead 37 - #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 38 - 39 - #define TIMEOUT_US (150 * USEC_PER_MSEC) 40 - #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 41 - #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 42 - #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) 43 - 44 - #define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 45 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 46 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 47 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 48 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 49 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 50 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 51 - 52 - #define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 53 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 54 - (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 55 - 56 - #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 57 - 58 - #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \ 59 - (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR))) 60 - 61 - #define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \ 62 - (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE))) 63 - 64 - #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 65 - #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) 66 - 67 - #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 68 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 69 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 70 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 71 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 72 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 73 - (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 74 - 75 - static void ivpu_hw_wa_init(struct ivpu_device *vdev) 76 - { 77 - vdev->wa.punit_disabled = false; 78 - vdev->wa.clear_runtime_mem = false; 79 - 80 - REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); 81 - if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { 82 - /* Writing 1s does not clear the interrupt status register */ 83 - vdev->wa.interrupt_clear_with_0 = true; 84 - REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 85 - } 86 - 87 - IVPU_PRINT_WA(punit_disabled); 88 - IVPU_PRINT_WA(clear_runtime_mem); 89 - IVPU_PRINT_WA(interrupt_clear_with_0); 90 - } 91 - 92 - static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) 93 - { 94 - vdev->timeout.boot = 1000; 95 - vdev->timeout.jsm = 500; 96 - vdev->timeout.tdr = 2000; 97 - vdev->timeout.reschedule_suspend = 10; 98 - vdev->timeout.autosuspend = 10; 99 - vdev->timeout.d0i3_entry_msg = 5; 100 - } 101 - 102 - static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) 103 - { 104 - return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 105 - } 106 - 107 - /* Send KMD initiated workpoint change */ 108 - static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, 109 - u16 target_ratio, u16 config) 110 - { 111 - int ret; 112 - u32 val; 113 - 114 - ret = ivpu_pll_wait_for_cmd_send(vdev); 115 - if (ret) { 116 - ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); 117 - return ret; 118 - } 119 - 120 - val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0); 121 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); 122 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); 123 - REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val); 124 - 125 - val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1); 126 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); 127 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val); 128 - REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val); 129 - 130 - val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2); 131 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, config, val); 132 - REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val); 133 - 134 - val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD); 135 - val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val); 136 - REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val); 137 - 138 - ret = ivpu_pll_wait_for_cmd_send(vdev); 139 - if (ret) 140 - ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); 141 - 142 - return ret; 143 - } 144 - 145 - static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable) 146 - { 147 - u32 exp_val = enable ? 0x1 : 0x0; 148 - 149 - if (IVPU_WA(punit_disabled)) 150 - return 0; 151 - 152 - return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); 153 - } 154 - 155 - static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) 156 - { 157 - if (IVPU_WA(punit_disabled)) 158 - return 0; 159 - 160 - return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); 161 - } 162 - 163 - static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) 164 - { 165 - struct ivpu_hw_info *hw = vdev->hw; 166 - u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio; 167 - u32 fmin_fuse, fmax_fuse; 168 - 169 - fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE); 170 - fuse_min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 171 - fuse_pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse); 172 - 173 - fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE); 174 - fuse_max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 175 - 176 - hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); 177 - hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); 178 - hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 179 - } 180 - 181 - static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev) 182 - { 183 - return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100); 184 - } 185 - 186 - static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) 187 - { 188 - struct ivpu_hw_info *hw = vdev->hw; 189 - u16 target_ratio; 190 - u16 config; 191 - int ret; 192 - 193 - if (IVPU_WA(punit_disabled)) { 194 - ivpu_dbg(vdev, PM, "Skipping PLL request\n"); 195 - return 0; 196 - } 197 - 198 - if (enable) { 199 - target_ratio = hw->pll.pn_ratio; 200 - config = hw->config; 201 - } else { 202 - target_ratio = 0; 203 - config = 0; 204 - } 205 - 206 - ivpu_dbg(vdev, PM, "PLL workpoint request: config 0x%04x pll ratio 0x%x\n", 207 - config, target_ratio); 208 - 209 - ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config); 210 - if (ret) { 211 - ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); 212 - return ret; 213 - } 214 - 215 - ret = ivpu_pll_wait_for_lock(vdev, enable); 216 - if (ret) { 217 - ivpu_err(vdev, "Timed out waiting for PLL lock\n"); 218 - return ret; 219 - } 220 - 221 - if (enable) { 222 - ret = ivpu_pll_wait_for_status_ready(vdev); 223 - if (ret) { 224 - ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); 225 - return ret; 226 - } 227 - 228 - ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev); 229 - if (ret) { 230 - ivpu_err(vdev, "Timed out waiting for NPU IP bar\n"); 231 - return ret; 232 - } 233 - } 234 - 235 - return 0; 236 - } 237 - 238 - static int ivpu_pll_enable(struct ivpu_device *vdev) 239 - { 240 - return ivpu_pll_drive(vdev, true); 241 - } 242 - 243 - static int ivpu_pll_disable(struct ivpu_device *vdev) 244 - { 245 - return ivpu_pll_drive(vdev, false); 246 - } 247 - 248 - static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev) 249 - { 250 - u32 val = 0; 251 - 252 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val); 253 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val); 254 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val); 255 - 256 - REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val); 257 - } 258 - 259 - static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 260 - { 261 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET); 262 - 263 - if (enable) { 264 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 265 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 266 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 267 - } else { 268 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 269 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 270 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 271 - } 272 - 273 - REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val); 274 - } 275 - 276 - static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 277 - { 278 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET); 279 - 280 - if (enable) { 281 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 282 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 283 - val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 284 - } else { 285 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 286 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 287 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 288 - } 289 - 290 - REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val); 291 - } 292 - 293 - static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 294 - { 295 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 296 - 297 - if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 298 - return -EIO; 299 - 300 - return 0; 301 - } 302 - 303 - static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 304 - { 305 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN); 306 - 307 - if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 308 - return -EIO; 309 - 310 - return 0; 311 - } 312 - 313 - static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 314 - { 315 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY); 316 - 317 - if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 318 - return -EIO; 319 - 320 - return 0; 321 - } 322 - 323 - static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) 324 - { 325 - u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 326 - 327 - if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 328 - !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 329 - return -EIO; 330 - 331 - return 0; 332 - } 333 - 334 - static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 335 - { 336 - u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN); 337 - 338 - if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 339 - !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 340 - return -EIO; 341 - 342 - return 0; 343 - } 344 - 345 - static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 346 - { 347 - u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY); 348 - 349 - if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 350 - !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 351 - return -EIO; 352 - 353 - return 0; 354 - } 355 - 356 - static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev) 357 - { 358 - ivpu_boot_host_ss_rst_clr_assert(vdev); 359 - 360 - return ivpu_boot_noc_qreqn_check(vdev, 0x0); 361 - } 362 - 363 - static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev) 364 - { 365 - REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0); 366 - } 367 - 368 - static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 369 - { 370 - int ret; 371 - u32 val; 372 - 373 - val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 374 - if (enable) 375 - val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 376 - else 377 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 378 - REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val); 379 - 380 - ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 381 - if (ret) { 382 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 383 - return ret; 384 - } 385 - 386 - ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 387 - if (ret) 388 - ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 389 - 390 - return ret; 391 - } 392 - 393 - static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) 394 - { 395 - return ivpu_boot_host_ss_axi_drive(vdev, true); 396 - } 397 - 398 - static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) 399 - { 400 - int ret; 401 - u32 val; 402 - 403 - val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 404 - if (enable) { 405 - val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 406 - val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 407 - } else { 408 - val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 409 - val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 410 - } 411 - REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val); 412 - 413 - ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 414 - if (ret) { 415 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 416 - return ret; 417 - } 418 - 419 - ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); 420 - if (ret) 421 - ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 422 - 423 - return ret; 424 - } 425 - 426 - static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) 427 - { 428 - return ivpu_boot_host_ss_top_noc_drive(vdev, true); 429 - } 430 - 431 - static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) 432 - { 433 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 434 - 435 - if (enable) 436 - val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 437 - else 438 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 439 - 440 - REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 441 - } 442 - 443 - static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) 444 - { 445 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0); 446 - 447 - if (enable) 448 - val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 449 - else 450 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 451 - 452 - REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 453 - } 454 - 455 - static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 456 - { 457 - return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, 458 - exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); 459 - } 460 - 461 - static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 462 - { 463 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0); 464 - 465 - if (enable) 466 - val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 467 - else 468 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 469 - 470 - REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val); 471 - } 472 - 473 - static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable) 474 - { 475 - u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE); 476 - 477 - if (enable) 478 - val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 479 - else 480 - val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 481 - 482 - REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); 483 - } 484 - 485 - static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) 486 - { 487 - int ret; 488 - 489 - ivpu_boot_pwr_island_trickle_drive(vdev, true); 490 - ivpu_boot_pwr_island_drive(vdev, true); 491 - 492 - ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); 493 - if (ret) { 494 - ivpu_err(vdev, "Timed out waiting for power island status\n"); 495 - return ret; 496 - } 497 - 498 - ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); 499 - if (ret) { 500 - ivpu_err(vdev, "Failed qrenqn check %d\n", ret); 501 - return ret; 502 - } 503 - 504 - ivpu_boot_host_ss_clk_drive(vdev, true); 505 - ivpu_boot_pwr_island_isolation_drive(vdev, false); 506 - ivpu_boot_host_ss_rst_drive(vdev, true); 507 - ivpu_boot_dpu_active_drive(vdev, true); 508 - 509 - return ret; 510 - } 511 - 512 - static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 513 - { 514 - u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); 515 - 516 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); 517 - val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); 518 - 519 - if (ivpu_is_force_snoop_enabled(vdev)) 520 - val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 521 - else 522 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 523 - 524 - REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); 525 - } 526 - 527 - static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) 528 - { 529 - u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV); 530 - 531 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 532 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 533 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 534 - val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 535 - 536 - REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val); 537 - } 538 - 539 - static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) 540 - { 541 - u32 val; 542 - 543 - val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); 544 - val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); 545 - 546 - val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); 547 - REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 548 - 549 - val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 550 - REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 551 - 552 - val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 553 - REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 554 - 555 - val = vdev->fw->entry_point >> 9; 556 - REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 557 - 558 - val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val); 559 - REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 560 - 561 - ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 562 - vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume"); 563 - } 564 - 565 - static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) 566 - { 567 - int ret; 568 - u32 val; 569 - 570 - ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 571 - if (ret) { 572 - ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 573 - return ret; 574 - } 575 - 576 - val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL); 577 - if (enable) 578 - val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 579 - else 580 - val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 581 - REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val); 582 - 583 - ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 584 - if (ret) 585 - ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 586 - 587 - return ret; 588 - } 589 - 590 - static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev) 591 - { 592 - struct ivpu_hw_info *hw = vdev->hw; 593 - 594 - hw->tile_fuse = TILE_FUSE_ENABLE_BOTH; 595 - hw->sku = TILE_SKU_BOTH; 596 - hw->config = WP_CONFIG_2_TILE_4_3_RATIO; 597 - hw->sched_mode = ivpu_sched_mode; 598 - 599 - ivpu_pll_init_frequency_ratios(vdev); 600 - 601 - ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M); 602 - ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M); 603 - ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G); 604 - ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G); 605 - 606 - vdev->platform = IVPU_PLATFORM_SILICON; 607 - ivpu_hw_wa_init(vdev); 608 - ivpu_hw_timeouts_init(vdev); 609 - 610 - return 0; 611 - } 612 - 613 - static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev) 614 - { 615 - int ret; 616 - u32 val; 617 - 618 - if (IVPU_WA(punit_disabled)) 619 - return 0; 620 - 621 - ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 622 - if (ret) { 623 - ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); 624 - return ret; 625 - } 626 - 627 - val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET); 628 - val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val); 629 - REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val); 630 - 631 - ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 632 - if (ret) 633 - ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 634 - 635 - return ret; 636 - } 637 - 638 - static int ivpu_hw_37xx_reset(struct ivpu_device *vdev) 639 - { 640 - int ret = 0; 641 - 642 - if (ivpu_hw_37xx_ip_reset(vdev)) { 643 - ivpu_err(vdev, "Failed to reset NPU\n"); 644 - ret = -EIO; 645 - } 646 - 647 - if (ivpu_pll_disable(vdev)) { 648 - ivpu_err(vdev, "Failed to disable PLL\n"); 649 - ret = -EIO; 650 - } 651 - 652 - return ret; 653 - } 654 - 655 - static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev) 656 - { 657 - int ret; 658 - 659 - ret = ivpu_boot_d0i3_drive(vdev, true); 660 - if (ret) 661 - ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 662 - 663 - udelay(5); /* VPU requires 5 us to complete the transition */ 664 - 665 - return ret; 666 - } 667 - 668 - static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev) 669 - { 670 - int ret; 671 - 672 - ret = ivpu_boot_d0i3_drive(vdev, false); 673 - if (ret) 674 - ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 675 - 676 - return ret; 677 - } 678 - 679 - static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) 680 - { 681 - int ret; 682 - 683 - /* PLL requests may fail when powering down, so issue WP 0 here */ 684 - ret = ivpu_pll_disable(vdev); 685 - if (ret) 686 - ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret); 687 - 688 - ret = ivpu_hw_37xx_d0i3_disable(vdev); 689 - if (ret) 690 - ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 691 - 692 - ret = ivpu_pll_enable(vdev); 693 - if (ret) { 694 - ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); 695 - return ret; 696 - } 697 - 698 - ret = ivpu_boot_host_ss_configure(vdev); 699 - if (ret) { 700 - ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 701 - return ret; 702 - } 703 - 704 - /* 705 - * The control circuitry for vpu_idle indication logic powers up active. 706 - * To ensure unnecessary low power mode signal from LRT during bring up, 707 - * KMD disables the circuitry prior to bringing up the Main Power island. 708 - */ 709 - ivpu_boot_vpu_idle_gen_disable(vdev); 710 - 711 - ret = ivpu_boot_pwr_domain_enable(vdev); 712 - if (ret) { 713 - ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 714 - return ret; 715 - } 716 - 717 - ret = ivpu_boot_host_ss_axi_enable(vdev); 718 - if (ret) { 719 - ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 720 - return ret; 721 - } 722 - 723 - ret = ivpu_boot_host_ss_top_noc_enable(vdev); 724 - if (ret) 725 - ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 726 - 727 - return ret; 728 - } 729 - 730 - static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev) 731 - { 732 - ivpu_boot_no_snoop_enable(vdev); 733 - ivpu_boot_tbu_mmu_enable(vdev); 734 - ivpu_boot_soc_cpu_boot(vdev); 735 - 736 - return 0; 737 - } 738 - 739 - static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) 740 - { 741 - u32 val; 742 - 743 - if (IVPU_WA(punit_disabled)) 744 - return true; 745 - 746 - val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS); 747 - return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) && 748 - REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val); 749 - } 750 - 751 - static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev) 752 - { 753 - return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 754 - } 755 - 756 - static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) 757 - { 758 - vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); 759 - vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT); 760 - } 761 - 762 - static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) 763 - { 764 - int ret = 0; 765 - 766 - ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev); 767 - 768 - if (!ivpu_hw_37xx_is_idle(vdev)) 769 - ivpu_warn(vdev, "NPU not idle during power down\n"); 770 - 771 - if (ivpu_hw_37xx_reset(vdev)) { 772 - ivpu_err(vdev, "Failed to reset NPU\n"); 773 - ret = -EIO; 774 - } 775 - 776 - if (ivpu_hw_37xx_d0i3_enable(vdev)) { 777 - ivpu_err(vdev, "Failed to enter D0I3\n"); 778 - ret = -EIO; 779 - } 780 - 781 - return ret; 782 - } 783 - 784 - static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) 785 - { 786 - u32 val; 787 - 788 - /* Enable writing and set non-zero WDT value */ 789 - REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 790 - REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 791 - 792 - /* Enable writing and disable watchdog timer */ 793 - REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 794 - REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0); 795 - 796 - /* Now clear the timeout interrupt */ 797 - val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG); 798 - val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 799 - REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); 800 - } 801 - 802 - static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev) 803 - { 804 - return PLL_PROF_CLK_FREQ; 805 - } 806 - 807 - static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) 808 - { 809 - /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */ 810 - } 811 - 812 - static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 813 - { 814 - u32 pll_clock = PLL_REF_CLK_FREQ * ratio; 815 - u32 cpu_clock; 816 - 817 - if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3) 818 - cpu_clock = pll_clock * 2 / 4; 819 - else 820 - cpu_clock = pll_clock * 2 / 5; 821 - 822 - return cpu_clock; 823 - } 824 - 825 - /* Register indirect accesses */ 826 - static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev) 827 - { 828 - u32 pll_curr_ratio; 829 - 830 - pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL); 831 - pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK; 832 - 833 - if (!ivpu_is_silicon(vdev)) 834 - return PLL_SIMULATION_FREQ; 835 - 836 - return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio); 837 - } 838 - 839 - static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev) 840 - { 841 - return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET); 842 - } 843 - 844 - static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev) 845 - { 846 - return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE); 847 - } 848 - 849 - static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev) 850 - { 851 - return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE); 852 - } 853 - 854 - static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) 855 - { 856 - u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0; 857 - u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET); 858 - 859 - REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 860 - } 861 - 862 - static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 863 - { 864 - return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM); 865 - } 866 - 867 - static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) 868 - { 869 - u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT); 870 - 871 - return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 872 - } 873 - 874 - static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 875 - { 876 - REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 877 - } 878 - 879 - static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev) 880 - { 881 - REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); 882 - } 883 - 884 - static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev) 885 - { 886 - REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); 887 - REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); 888 - REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); 889 - REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 890 - } 891 - 892 - static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev) 893 - { 894 - REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 895 - REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); 896 - REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 897 - REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0); 898 - } 899 - 900 - static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev) 901 - { 902 - ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ"); 903 - } 904 - 905 - static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev) 906 - { 907 - ivpu_hw_wdt_disable(vdev); 908 - ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ"); 909 - } 910 - 911 - static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) 912 - { 913 - ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); 914 - } 915 - 916 - /* Handler for IRQs from VPU core (irqV) */ 917 - static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) 918 - { 919 - u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 920 - 921 - if (!status) 922 - return false; 923 - 924 - REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); 925 - 926 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 927 - ivpu_mmu_irq_evtq_handler(vdev); 928 - 929 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 930 - ivpu_ipc_irq_handler(vdev, wake_thread); 931 - 932 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 933 - ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 934 - 935 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 936 - ivpu_mmu_irq_gerr_handler(vdev); 937 - 938 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 939 - ivpu_hw_37xx_irq_wdt_mss_handler(vdev); 940 - 941 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 942 - ivpu_hw_37xx_irq_wdt_nce_handler(vdev); 943 - 944 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 945 - ivpu_hw_37xx_irq_noc_firewall_handler(vdev); 946 - 947 - return true; 948 - } 949 - 950 - /* Handler for IRQs from Buttress core (irqB) */ 951 - static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) 952 - { 953 - u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 954 - bool schedule_recovery = false; 955 - 956 - if (!status) 957 - return false; 958 - 959 - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) 960 - ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", 961 - REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL)); 962 - 963 - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) { 964 - ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 965 - REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1); 966 - schedule_recovery = true; 967 - } 968 - 969 - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) { 970 - u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 971 - 972 - ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 973 - ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log), 974 - REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log), 975 - REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log)); 976 - REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1); 977 - schedule_recovery = true; 978 - } 979 - 980 - /* This must be done after interrupts are cleared at the source. */ 981 - if (IVPU_WA(interrupt_clear_with_0)) 982 - /* 983 - * Writing 1 triggers an interrupt, so we can't perform read update write. 984 - * Clear local interrupt status by writing 0 to all bits. 985 - */ 986 - REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 987 - else 988 - REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status); 989 - 990 - if (schedule_recovery) 991 - ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 992 - 993 - return true; 994 - } 995 - 996 - static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) 997 - { 998 - struct ivpu_device *vdev = ptr; 999 - bool irqv_handled, irqb_handled, wake_thread = false; 1000 - 1001 - REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 1002 - 1003 - irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread); 1004 - irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq); 1005 - 1006 - /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 1007 - REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 1008 - 1009 - if (wake_thread) 1010 - return IRQ_WAKE_THREAD; 1011 - if (irqv_handled || irqb_handled) 1012 - return IRQ_HANDLED; 1013 - return IRQ_NONE; 1014 - } 1015 - 1016 - static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev) 1017 - { 1018 - u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1019 - u32 irqb = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1020 - 1021 - if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev)) 1022 - ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1023 - 1024 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) 1025 - ivpu_err(vdev, "WDT MSS timeout detected\n"); 1026 - 1027 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) 1028 - ivpu_err(vdev, "WDT NCE timeout detected\n"); 1029 - 1030 - if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) 1031 - ivpu_err(vdev, "NOC Firewall irq detected\n"); 1032 - 1033 - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, irqb)) 1034 - ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 1035 - 1036 - if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, irqb)) { 1037 - u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 1038 - 1039 - ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 1040 - ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log), 1041 - REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log), 1042 - REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log)); 1043 - } 1044 - } 1045 - 1046 - const struct ivpu_hw_ops ivpu_hw_37xx_ops = { 1047 - .info_init = ivpu_hw_37xx_info_init, 1048 - .power_up = ivpu_hw_37xx_power_up, 1049 - .is_idle = ivpu_hw_37xx_is_idle, 1050 - .wait_for_idle = ivpu_hw_37xx_wait_for_idle, 1051 - .power_down = ivpu_hw_37xx_power_down, 1052 - .reset = ivpu_hw_37xx_reset, 1053 - .boot_fw = ivpu_hw_37xx_boot_fw, 1054 - .wdt_disable = ivpu_hw_37xx_wdt_disable, 1055 - .diagnose_failure = ivpu_hw_37xx_diagnose_failure, 1056 - .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get, 1057 - .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive, 1058 - .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, 1059 - .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq, 1060 - .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, 1061 - .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, 1062 - .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get, 1063 - .reg_db_set = ivpu_hw_37xx_reg_db_set, 1064 - .reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get, 1065 - .reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get, 1066 - .reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set, 1067 - .irq_clear = ivpu_hw_37xx_irq_clear, 1068 - .irq_enable = ivpu_hw_37xx_irq_enable, 1069 - .irq_disable = ivpu_hw_37xx_irq_disable, 1070 - .irq_handler = ivpu_hw_37xx_irq_handler, 1071 - };
-1256
drivers/accel/ivpu/ivpu_hw_40xx.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (C) 2020-2024 Intel Corporation 4 - */ 5 - 6 - #include "ivpu_drv.h" 7 - #include "ivpu_fw.h" 8 - #include "ivpu_hw.h" 9 - #include "ivpu_hw_btrs_lnl_reg.h" 10 - #include "ivpu_hw_40xx_reg.h" 11 - #include "ivpu_hw_reg_io.h" 12 - #include "ivpu_ipc.h" 13 - #include "ivpu_mmu.h" 14 - #include "ivpu_pm.h" 15 - 16 - #include <linux/dmi.h> 17 - 18 - #define TILE_MAX_NUM 6 19 - #define TILE_MAX_MASK 0x3f 20 - 21 - #define LNL_HW_ID 0x4040 22 - 23 - #define SKU_TILE_SHIFT 0u 24 - #define SKU_TILE_MASK 0x0000ffffu 25 - #define SKU_HW_ID_SHIFT 16u 26 - #define SKU_HW_ID_MASK 0xffff0000u 27 - 28 - #define PLL_CONFIG_DEFAULT 0x0 29 - #define PLL_CDYN_DEFAULT 0x80 30 - #define PLL_EPP_DEFAULT 0x80 31 - #define PLL_REF_CLK_FREQ (50 * 1000000) 32 - #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 33 - 34 - #define PLL_PROFILING_FREQ_DEFAULT 38400000 35 - #define PLL_PROFILING_FREQ_HIGH 400000000 36 - 37 - #define TIM_SAFE_ENABLE 0xf1d0dead 38 - #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 39 - 40 - #define TIMEOUT_US (150 * USEC_PER_MSEC) 41 - #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 42 - #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 43 - #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) 44 - 45 - #define WEIGHTS_DEFAULT 0xf711f711u 46 - #define WEIGHTS_ATS_DEFAULT 0x0000f711u 47 - 48 - #define ICB_0_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 49 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 50 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 51 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 52 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 53 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 54 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 55 - 56 - #define ICB_1_IRQ_MASK ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 57 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 58 - (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 59 - 60 - #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 61 - 62 - #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \ 63 - (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \ 64 - (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \ 65 - (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \ 66 - (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \ 67 - (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR))) 68 - 69 - #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 70 - #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) 71 - 72 - #define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 73 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 74 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 75 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 76 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 77 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 78 - (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 79 - 80 - static char *ivpu_platform_to_str(u32 platform) 81 - { 82 - switch (platform) { 83 - case IVPU_PLATFORM_SILICON: 84 - return "SILICON"; 85 - case IVPU_PLATFORM_SIMICS: 86 - return "SIMICS"; 87 - case IVPU_PLATFORM_FPGA: 88 - return "FPGA"; 89 - default: 90 - return "Invalid platform"; 91 - } 92 - } 93 - 94 - static const struct dmi_system_id ivpu_dmi_platform_simulation[] = { 95 - { 96 - .ident = "Intel Simics", 97 - .matches = { 98 - DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"), 99 - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 100 - DMI_MATCH(DMI_BOARD_SERIAL, "123456789"), 101 - }, 102 - }, 103 - { 104 - .ident = "Intel Simics", 105 - .matches = { 106 - DMI_MATCH(DMI_BOARD_NAME, "Simics"), 107 - }, 108 - }, 109 - { } 110 - }; 111 - 112 - static void ivpu_hw_read_platform(struct ivpu_device *vdev) 113 - { 114 - if (dmi_check_system(ivpu_dmi_platform_simulation)) 115 - vdev->platform = IVPU_PLATFORM_SIMICS; 116 - else 117 - vdev->platform = IVPU_PLATFORM_SILICON; 118 - 119 - ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", 120 - ivpu_platform_to_str(vdev->platform), vdev->platform); 121 - } 122 - 123 - static void ivpu_hw_wa_init(struct ivpu_device *vdev) 124 - { 125 - vdev->wa.punit_disabled = ivpu_is_fpga(vdev); 126 - vdev->wa.clear_runtime_mem = false; 127 - 128 - if (ivpu_hw_gen(vdev) == IVPU_HW_40XX) 129 - vdev->wa.disable_clock_relinquish = true; 130 - 131 - IVPU_PRINT_WA(punit_disabled); 132 - IVPU_PRINT_WA(clear_runtime_mem); 133 - IVPU_PRINT_WA(disable_clock_relinquish); 134 - } 135 - 136 - static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) 137 - { 138 - if (ivpu_is_fpga(vdev)) { 139 - vdev->timeout.boot = 100000; 140 - vdev->timeout.jsm = 50000; 141 - vdev->timeout.tdr = 2000000; 142 - vdev->timeout.reschedule_suspend = 1000; 143 - vdev->timeout.autosuspend = -1; 144 - vdev->timeout.d0i3_entry_msg = 500; 145 - } else if (ivpu_is_simics(vdev)) { 146 - vdev->timeout.boot = 50; 147 - vdev->timeout.jsm = 500; 148 - vdev->timeout.tdr = 10000; 149 - vdev->timeout.reschedule_suspend = 10; 150 - vdev->timeout.autosuspend = -1; 151 - vdev->timeout.d0i3_entry_msg = 100; 152 - } else { 153 - vdev->timeout.boot = 1000; 154 - vdev->timeout.jsm = 500; 155 - vdev->timeout.tdr = 2000; 156 - vdev->timeout.reschedule_suspend = 10; 157 - vdev->timeout.autosuspend = 10; 158 - vdev->timeout.d0i3_entry_msg = 5; 159 - } 160 - } 161 - 162 - static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) 163 - { 164 - return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 165 - } 166 - 167 - static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio, 168 - u16 target_ratio, u16 epp, u16 config, u16 cdyn) 169 - { 170 - int ret; 171 - u32 val; 172 - 173 - ret = ivpu_pll_wait_for_cmd_send(vdev); 174 - if (ret) { 175 - ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret); 176 - return ret; 177 - } 178 - 179 - val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0); 180 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val); 181 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val); 182 - REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val); 183 - 184 - val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1); 185 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val); 186 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, epp, val); 187 - REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val); 188 - 189 - val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2); 190 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, config, val); 191 - val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, cdyn, val); 192 - REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val); 193 - 194 - val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD); 195 - val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val); 196 - REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val); 197 - 198 - ret = ivpu_pll_wait_for_cmd_send(vdev); 199 - if (ret) 200 - ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret); 201 - 202 - return ret; 203 - } 204 - 205 - static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev) 206 - { 207 - return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, 1, PLL_TIMEOUT_US); 208 - } 209 - 210 - static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev) 211 - { 212 - if (ivpu_is_simics(vdev)) 213 - return 0; 214 - 215 - return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); 216 - } 217 - 218 - static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev) 219 - { 220 - struct ivpu_hw_info *hw = vdev->hw; 221 - u8 fuse_min_ratio, fuse_pn_ratio, fuse_max_ratio; 222 - u32 fmin_fuse, fmax_fuse; 223 - 224 - fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE); 225 - fuse_min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 226 - fuse_pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse); 227 - 228 - fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE); 229 - fuse_max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 230 - 231 - hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio); 232 - hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio); 233 - hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 234 - } 235 - 236 - static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable) 237 - { 238 - u16 config = enable ? PLL_CONFIG_DEFAULT : 0; 239 - u16 cdyn = enable ? PLL_CDYN_DEFAULT : 0; 240 - u16 epp = enable ? PLL_EPP_DEFAULT : 0; 241 - struct ivpu_hw_info *hw = vdev->hw; 242 - u16 target_ratio = hw->pll.pn_ratio; 243 - int ret; 244 - 245 - ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, epp: 0x%x, config: 0x%x, cdyn: 0x%x\n", 246 - PLL_RATIO_TO_FREQ(target_ratio), epp, config, cdyn); 247 - 248 - ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, 249 - target_ratio, epp, config, cdyn); 250 - if (ret) { 251 - ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret); 252 - return ret; 253 - } 254 - 255 - if (enable) { 256 - ret = ivpu_pll_wait_for_status_ready(vdev); 257 - if (ret) { 258 - ivpu_err(vdev, "Timed out waiting for PLL ready status\n"); 259 - return ret; 260 - } 261 - } 262 - 263 - return 0; 264 - } 265 - 266 - static int ivpu_pll_enable(struct ivpu_device *vdev) 267 - { 268 - return ivpu_pll_drive(vdev, true); 269 - } 270 - 271 - static int ivpu_pll_disable(struct ivpu_device *vdev) 272 - { 273 - return ivpu_pll_drive(vdev, false); 274 - } 275 - 276 - static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 277 - { 278 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN); 279 - 280 - if (enable) { 281 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 282 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 283 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 284 - } else { 285 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 286 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 287 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 288 - } 289 - 290 - REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val); 291 - } 292 - 293 - static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 294 - { 295 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN); 296 - 297 - if (enable) { 298 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 299 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 300 - val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 301 - } else { 302 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 303 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 304 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 305 - } 306 - 307 - REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val); 308 - } 309 - 310 - static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 311 - { 312 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 313 - 314 - if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 315 - return -EIO; 316 - 317 - return 0; 318 - } 319 - 320 - static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 321 - { 322 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN); 323 - 324 - if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 325 - return -EIO; 326 - 327 - return 0; 328 - } 329 - 330 - static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 331 - { 332 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY); 333 - 334 - if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 335 - return -EIO; 336 - 337 - return 0; 338 - } 339 - 340 - static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val) 341 - { 342 - u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 343 - 344 - if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 345 - !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 346 - return -EIO; 347 - 348 - return 0; 349 - } 350 - 351 - static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 352 - { 353 - u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN); 354 - 355 - if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 356 - !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 357 - return -EIO; 358 - 359 - return 0; 360 - } 361 - 362 - static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 363 - { 364 - u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY); 365 - 366 - if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 367 - !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 368 - return -EIO; 369 - 370 - return 0; 371 - } 372 - 373 - static void ivpu_boot_idle_gen_drive(struct ivpu_device *vdev, bool enable) 374 - { 375 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN); 376 - 377 - if (enable) 378 - val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 379 - else 380 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 381 - 382 - REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val); 383 - } 384 - 385 - static int ivpu_boot_host_ss_check(struct ivpu_device *vdev) 386 - { 387 - int ret; 388 - 389 - ret = ivpu_boot_noc_qreqn_check(vdev, 0x0); 390 - if (ret) { 391 - ivpu_err(vdev, "Failed qreqn check: %d\n", ret); 392 - return ret; 393 - } 394 - 395 - ret = ivpu_boot_noc_qacceptn_check(vdev, 0x0); 396 - if (ret) { 397 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 398 - return ret; 399 - } 400 - 401 - ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 402 - if (ret) 403 - ivpu_err(vdev, "Failed qdeny check %d\n", ret); 404 - 405 - return ret; 406 - } 407 - 408 - static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 409 - { 410 - int ret; 411 - u32 val; 412 - 413 - val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 414 - if (enable) 415 - val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 416 - else 417 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 418 - REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val); 419 - 420 - ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 421 - if (ret) { 422 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 423 - return ret; 424 - } 425 - 426 - ret = ivpu_boot_noc_qdeny_check(vdev, 0x0); 427 - if (ret) { 428 - ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 429 - return ret; 430 - } 431 - 432 - if (enable) { 433 - REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); 434 - REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); 435 - } 436 - 437 - return ret; 438 - } 439 - 440 - static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev) 441 - { 442 - return ivpu_boot_host_ss_axi_drive(vdev, true); 443 - } 444 - 445 - static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable) 446 - { 447 - int ret; 448 - u32 val; 449 - 450 - val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 451 - if (enable) { 452 - val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 453 - val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 454 - } else { 455 - val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 456 - val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 457 - } 458 - REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val); 459 - 460 - ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 461 - if (ret) { 462 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 463 - return ret; 464 - } 465 - 466 - ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0); 467 - if (ret) 468 - ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 469 - 470 - return ret; 471 - } 472 - 473 - static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev) 474 - { 475 - return ivpu_boot_host_ss_top_noc_drive(vdev, true); 476 - } 477 - 478 - static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable) 479 - { 480 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 481 - 482 - if (enable) 483 - val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 484 - else 485 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 486 - 487 - REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 488 - 489 - if (enable) 490 - ndelay(500); 491 - } 492 - 493 - static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable) 494 - { 495 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0); 496 - 497 - if (enable) 498 - val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 499 - else 500 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 501 - 502 - REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 503 - 504 - if (!enable) 505 - ndelay(500); 506 - } 507 - 508 - static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 509 - { 510 - if (ivpu_is_fpga(vdev)) 511 - return 0; 512 - 513 - return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, 514 - exp_val, PWR_ISLAND_STATUS_TIMEOUT_US); 515 - } 516 - 517 - static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 518 - { 519 - u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0); 520 - 521 - if (enable) 522 - val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 523 - else 524 - val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 525 - 526 - REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val); 527 - } 528 - 529 - static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev) 530 - { 531 - u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); 532 - 533 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); 534 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 535 - 536 - if (ivpu_is_force_snoop_enabled(vdev)) 537 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 538 - else 539 - val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 540 - 541 - REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); 542 - } 543 - 544 - static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev) 545 - { 546 - u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV); 547 - 548 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 549 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 550 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); 551 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); 552 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 553 - val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 554 - 555 - REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val); 556 - } 557 - 558 - static int ivpu_boot_cpu_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 559 - { 560 - u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN); 561 - 562 - if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val)) 563 - return -EIO; 564 - 565 - return 0; 566 - } 567 - 568 - static int ivpu_boot_cpu_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 569 - { 570 - u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY); 571 - 572 - if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val)) 573 - return -EIO; 574 - 575 - return 0; 576 - } 577 - 578 - static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev) 579 - { 580 - int ret; 581 - 582 - ret = ivpu_wait_for_clock_own_resource_ack(vdev); 583 - if (ret) { 584 - ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n"); 585 - return ret; 586 - } 587 - 588 - ivpu_boot_pwr_island_trickle_drive(vdev, true); 589 - ivpu_boot_pwr_island_drive(vdev, true); 590 - 591 - ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1); 592 - if (ret) { 593 - ivpu_err(vdev, "Timed out waiting for power island status\n"); 594 - return ret; 595 - } 596 - 597 - ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0); 598 - if (ret) { 599 - ivpu_err(vdev, "Failed qrenqn check %d\n", ret); 600 - return ret; 601 - } 602 - 603 - ivpu_boot_host_ss_clk_drive(vdev, true); 604 - ivpu_boot_host_ss_rst_drive(vdev, true); 605 - ivpu_boot_pwr_island_isolation_drive(vdev, false); 606 - 607 - return ret; 608 - } 609 - 610 - static int ivpu_boot_soc_cpu_drive(struct ivpu_device *vdev, bool enable) 611 - { 612 - int ret; 613 - u32 val; 614 - 615 - val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN); 616 - if (enable) 617 - val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 618 - else 619 - val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 620 - REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val); 621 - 622 - ret = ivpu_boot_cpu_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 623 - if (ret) { 624 - ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 625 - return ret; 626 - } 627 - 628 - ret = ivpu_boot_cpu_noc_qdeny_check(vdev, 0x0); 629 - if (ret) 630 - ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 631 - 632 - return ret; 633 - } 634 - 635 - static int ivpu_boot_soc_cpu_enable(struct ivpu_device *vdev) 636 - { 637 - return ivpu_boot_soc_cpu_drive(vdev, true); 638 - } 639 - 640 - static int ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev) 641 - { 642 - int ret; 643 - u32 val; 644 - u64 val64; 645 - 646 - ret = ivpu_boot_soc_cpu_enable(vdev); 647 - if (ret) { 648 - ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret); 649 - return ret; 650 - } 651 - 652 - val64 = vdev->fw->entry_point; 653 - val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1; 654 - REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64); 655 - 656 - val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO); 657 - val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val); 658 - REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val); 659 - 660 - ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 661 - ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume"); 662 - 663 - return 0; 664 - } 665 - 666 - static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable) 667 - { 668 - int ret; 669 - u32 val; 670 - 671 - ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 672 - if (ret) { 673 - ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 674 - return ret; 675 - } 676 - 677 - val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL); 678 - if (enable) 679 - val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 680 - else 681 - val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 682 - REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val); 683 - 684 - ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 685 - if (ret) { 686 - ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 687 - return ret; 688 - } 689 - 690 - return 0; 691 - } 692 - 693 - static bool ivpu_tile_disable_check(u32 config) 694 - { 695 - /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ 696 - if (config == 0) 697 - return true; 698 - 699 - if (config > BIT(TILE_MAX_NUM - 1)) 700 - return false; 701 - 702 - if ((config & (config - 1)) == 0) 703 - return true; 704 - 705 - return false; 706 - } 707 - 708 - static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) 709 - { 710 - struct ivpu_hw_info *hw = vdev->hw; 711 - u32 tile_disable; 712 - u32 fuse; 713 - 714 - fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE); 715 - if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) { 716 - ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); 717 - return -EIO; 718 - } 719 - 720 - tile_disable = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse); 721 - if (!ivpu_tile_disable_check(tile_disable)) { 722 - ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", tile_disable); 723 - return -EIO; 724 - } 725 - 726 - if (tile_disable) 727 - ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", 728 - TILE_MAX_NUM - 1, ffs(tile_disable) - 1); 729 - else 730 - ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM); 731 - 732 - hw->sched_mode = ivpu_sched_mode; 733 - hw->tile_fuse = tile_disable; 734 - hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 735 - 736 - ivpu_pll_init_frequency_ratios(vdev); 737 - 738 - ivpu_hw_init_range(&vdev->hw->ranges.global, 0x80000000, SZ_512M); 739 - ivpu_hw_init_range(&vdev->hw->ranges.user, 0x80000000, SZ_256M); 740 - ivpu_hw_init_range(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M); 741 - ivpu_hw_init_range(&vdev->hw->ranges.dma, 0x200000000, SZ_8G); 742 - 743 - ivpu_hw_read_platform(vdev); 744 - ivpu_hw_wa_init(vdev); 745 - ivpu_hw_timeouts_init(vdev); 746 - 747 - return 0; 748 - } 749 - 750 - static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev) 751 - { 752 - int ret; 753 - u32 val; 754 - 755 - ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 756 - if (ret) { 757 - ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); 758 - return ret; 759 - } 760 - 761 - val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET); 762 - val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val); 763 - REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val); 764 - 765 - ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 766 - if (ret) 767 - ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 768 - 769 - return ret; 770 - } 771 - 772 - static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) 773 - { 774 - int ret = 0; 775 - 776 - if (ivpu_hw_40xx_ip_reset(vdev)) { 777 - ivpu_err(vdev, "Failed to reset NPU IP\n"); 778 - ret = -EIO; 779 - } 780 - 781 - if (ivpu_pll_disable(vdev)) { 782 - ivpu_err(vdev, "Failed to disable PLL\n"); 783 - ret = -EIO; 784 - } 785 - 786 - return ret; 787 - } 788 - 789 - static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev) 790 - { 791 - int ret; 792 - 793 - if (IVPU_WA(punit_disabled)) 794 - return 0; 795 - 796 - ret = ivpu_boot_d0i3_drive(vdev, true); 797 - if (ret) 798 - ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 799 - 800 - udelay(5); /* VPU requires 5 us to complete the transition */ 801 - 802 - return ret; 803 - } 804 - 805 - static int ivpu_hw_40xx_d0i3_disable(struct ivpu_device *vdev) 806 - { 807 - int ret; 808 - 809 - if (IVPU_WA(punit_disabled)) 810 - return 0; 811 - 812 - ret = ivpu_boot_d0i3_drive(vdev, false); 813 - if (ret) 814 - ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 815 - 816 - return ret; 817 - } 818 - 819 - static void ivpu_hw_40xx_profiling_freq_reg_set(struct ivpu_device *vdev) 820 - { 821 - u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 822 - 823 - if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) 824 - val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 825 - else 826 - val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 827 - 828 - REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 829 - } 830 - 831 - static void ivpu_hw_40xx_ats_print(struct ivpu_device *vdev) 832 - { 833 - ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", 834 - REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable"); 835 - } 836 - 837 - static void ivpu_hw_40xx_clock_relinquish_disable(struct ivpu_device *vdev) 838 - { 839 - u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 840 - 841 - val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); 842 - REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 843 - } 844 - 845 - static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev) 846 - { 847 - int ret; 848 - 849 - ret = ivpu_hw_40xx_d0i3_disable(vdev); 850 - if (ret) 851 - ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); 852 - 853 - ret = ivpu_pll_enable(vdev); 854 - if (ret) { 855 - ivpu_err(vdev, "Failed to enable PLL: %d\n", ret); 856 - return ret; 857 - } 858 - 859 - if (IVPU_WA(disable_clock_relinquish)) 860 - ivpu_hw_40xx_clock_relinquish_disable(vdev); 861 - ivpu_hw_40xx_profiling_freq_reg_set(vdev); 862 - ivpu_hw_40xx_ats_print(vdev); 863 - 864 - ret = ivpu_boot_host_ss_check(vdev); 865 - if (ret) { 866 - ivpu_err(vdev, "Failed to configure host SS: %d\n", ret); 867 - return ret; 868 - } 869 - 870 - ivpu_boot_idle_gen_drive(vdev, false); 871 - 872 - ret = ivpu_boot_pwr_domain_enable(vdev); 873 - if (ret) { 874 - ivpu_err(vdev, "Failed to enable power domain: %d\n", ret); 875 - return ret; 876 - } 877 - 878 - ret = ivpu_boot_host_ss_axi_enable(vdev); 879 - if (ret) { 880 - ivpu_err(vdev, "Failed to enable AXI: %d\n", ret); 881 - return ret; 882 - } 883 - 884 - ret = ivpu_boot_host_ss_top_noc_enable(vdev); 885 - if (ret) 886 - ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret); 887 - 888 - return ret; 889 - } 890 - 891 - static int ivpu_hw_40xx_boot_fw(struct ivpu_device *vdev) 892 - { 893 - int ret; 894 - 895 - ivpu_boot_no_snoop_enable(vdev); 896 - ivpu_boot_tbu_mmu_enable(vdev); 897 - 898 - ret = ivpu_boot_soc_cpu_boot(vdev); 899 - if (ret) 900 - ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret); 901 - 902 - return ret; 903 - } 904 - 905 - static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) 906 - { 907 - u32 val; 908 - 909 - if (IVPU_WA(punit_disabled)) 910 - return true; 911 - 912 - val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 913 - return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) && 914 - REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val); 915 - } 916 - 917 - static int ivpu_hw_40xx_wait_for_idle(struct ivpu_device *vdev) 918 - { 919 - return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 920 - } 921 - 922 - static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) 923 - { 924 - vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); 925 - vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT); 926 - } 927 - 928 - static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) 929 - { 930 - int ret = 0; 931 - 932 - ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev); 933 - 934 - if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev)) 935 - ivpu_warn(vdev, "Failed to reset the NPU\n"); 936 - 937 - if (ivpu_pll_disable(vdev)) { 938 - ivpu_err(vdev, "Failed to disable PLL\n"); 939 - ret = -EIO; 940 - } 941 - 942 - if (ivpu_hw_40xx_d0i3_enable(vdev)) { 943 - ivpu_err(vdev, "Failed to enter D0I3\n"); 944 - ret = -EIO; 945 - } 946 - 947 - return ret; 948 - } 949 - 950 - static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev) 951 - { 952 - u32 val; 953 - 954 - REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 955 - REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 956 - 957 - REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 958 - REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0); 959 - 960 - val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG); 961 - val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 962 - REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); 963 - } 964 - 965 - static u32 ivpu_hw_40xx_profiling_freq_get(struct ivpu_device *vdev) 966 - { 967 - return vdev->hw->pll.profiling_freq; 968 - } 969 - 970 - static void ivpu_hw_40xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) 971 - { 972 - if (enable) 973 - vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH; 974 - else 975 - vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 976 - } 977 - 978 - /* Register indirect accesses */ 979 - static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) 980 - { 981 - u32 pll_curr_ratio; 982 - 983 - pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ); 984 - pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK; 985 - 986 - return PLL_RATIO_TO_FREQ(pll_curr_ratio); 987 - } 988 - 989 - static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 990 - { 991 - return PLL_RATIO_TO_FREQ(ratio); 992 - } 993 - 994 - static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev) 995 - { 996 - return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET); 997 - } 998 - 999 - static u32 ivpu_hw_40xx_reg_telemetry_size_get(struct ivpu_device *vdev) 1000 - { 1001 - return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE); 1002 - } 1003 - 1004 - static u32 ivpu_hw_40xx_reg_telemetry_enable_get(struct ivpu_device *vdev) 1005 - { 1006 - return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE); 1007 - } 1008 - 1009 - static void ivpu_hw_40xx_reg_db_set(struct ivpu_device *vdev, u32 db_id) 1010 - { 1011 - u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0; 1012 - u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET); 1013 - 1014 - REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 1015 - } 1016 - 1017 - static u32 ivpu_hw_40xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev) 1018 - { 1019 - return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM); 1020 - } 1021 - 1022 - static u32 ivpu_hw_40xx_reg_ipc_rx_count_get(struct ivpu_device *vdev) 1023 - { 1024 - u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT); 1025 - 1026 - return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 1027 - } 1028 - 1029 - static void ivpu_hw_40xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 1030 - { 1031 - REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 1032 - } 1033 - 1034 - static void ivpu_hw_40xx_irq_clear(struct ivpu_device *vdev) 1035 - { 1036 - REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK); 1037 - } 1038 - 1039 - static void ivpu_hw_40xx_irq_enable(struct ivpu_device *vdev) 1040 - { 1041 - REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK); 1042 - REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK); 1043 - REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK); 1044 - REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 1045 - } 1046 - 1047 - static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev) 1048 - { 1049 - REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 1050 - REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK); 1051 - REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 1052 - REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul); 1053 - } 1054 - 1055 - static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev) 1056 - { 1057 - /* TODO: For LNN hang consider engine reset instead of full recovery */ 1058 - ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ"); 1059 - } 1060 - 1061 - static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev) 1062 - { 1063 - ivpu_hw_wdt_disable(vdev); 1064 - ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ"); 1065 - } 1066 - 1067 - static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) 1068 - { 1069 - ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); 1070 - } 1071 - 1072 - /* Handler for IRQs from VPU core (irqV) */ 1073 - static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) 1074 - { 1075 - u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1076 - 1077 - if (!status) 1078 - return false; 1079 - 1080 - REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); 1081 - 1082 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 1083 - ivpu_mmu_irq_evtq_handler(vdev); 1084 - 1085 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1086 - ivpu_ipc_irq_handler(vdev, wake_thread); 1087 - 1088 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1089 - ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 1090 - 1091 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 1092 - ivpu_mmu_irq_gerr_handler(vdev); 1093 - 1094 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 1095 - ivpu_hw_40xx_irq_wdt_mss_handler(vdev); 1096 - 1097 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 1098 - ivpu_hw_40xx_irq_wdt_nce_handler(vdev); 1099 - 1100 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 1101 - ivpu_hw_40xx_irq_noc_firewall_handler(vdev); 1102 - 1103 - return true; 1104 - } 1105 - 1106 - /* Handler for IRQs from Buttress core (irqB) */ 1107 - static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) 1108 - { 1109 - bool schedule_recovery = false; 1110 - u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1111 - 1112 - if (!status) 1113 - return false; 1114 - 1115 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) 1116 - ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); 1117 - 1118 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) { 1119 - ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1120 - REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 1121 - REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 1122 - REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1); 1123 - schedule_recovery = true; 1124 - } 1125 - 1126 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) { 1127 - ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 1128 - REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1); 1129 - schedule_recovery = true; 1130 - } 1131 - 1132 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) { 1133 - ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 1134 - REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1); 1135 - schedule_recovery = true; 1136 - } 1137 - 1138 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) { 1139 - ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", 1140 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 1141 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 1142 - REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1); 1143 - schedule_recovery = true; 1144 - } 1145 - 1146 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) { 1147 - ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", 1148 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 1149 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 1150 - REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1); 1151 - schedule_recovery = true; 1152 - } 1153 - 1154 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { 1155 - ivpu_err(vdev, "Survivability error detected\n"); 1156 - schedule_recovery = true; 1157 - } 1158 - 1159 - /* This must be done after interrupts are cleared at the source. */ 1160 - REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status); 1161 - 1162 - if (schedule_recovery) 1163 - ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 1164 - 1165 - return true; 1166 - } 1167 - 1168 - static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) 1169 - { 1170 - bool irqv_handled, irqb_handled, wake_thread = false; 1171 - struct ivpu_device *vdev = ptr; 1172 - 1173 - REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 1174 - 1175 - irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread); 1176 - irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq); 1177 - 1178 - /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ 1179 - REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 1180 - 1181 - if (wake_thread) 1182 - return IRQ_WAKE_THREAD; 1183 - if (irqv_handled || irqb_handled) 1184 - return IRQ_HANDLED; 1185 - return IRQ_NONE; 1186 - } 1187 - 1188 - static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev) 1189 - { 1190 - u32 irqv = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; 1191 - u32 irqb = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; 1192 - 1193 - if (ivpu_hw_40xx_reg_ipc_rx_count_get(vdev)) 1194 - ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1195 - 1196 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv)) 1197 - ivpu_err(vdev, "WDT MSS timeout detected\n"); 1198 - 1199 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv)) 1200 - ivpu_err(vdev, "WDT NCE timeout detected\n"); 1201 - 1202 - if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv)) 1203 - ivpu_err(vdev, "NOC Firewall irq detected\n"); 1204 - 1205 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, irqb)) { 1206 - ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 1207 - REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 1208 - REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 1209 - } 1210 - 1211 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, irqb)) 1212 - ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 1213 - 1214 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, irqb)) 1215 - ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 1216 - 1217 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, irqb)) 1218 - ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", 1219 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 1220 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 1221 - 1222 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, irqb)) 1223 - ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", 1224 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 1225 - REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 1226 - 1227 - if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, irqb)) 1228 - ivpu_err(vdev, "Survivability error detected\n"); 1229 - } 1230 - 1231 - const struct ivpu_hw_ops ivpu_hw_40xx_ops = { 1232 - .info_init = ivpu_hw_40xx_info_init, 1233 - .power_up = ivpu_hw_40xx_power_up, 1234 - .is_idle = ivpu_hw_40xx_is_idle, 1235 - .wait_for_idle = ivpu_hw_40xx_wait_for_idle, 1236 - .power_down = ivpu_hw_40xx_power_down, 1237 - .reset = ivpu_hw_40xx_reset, 1238 - .boot_fw = ivpu_hw_40xx_boot_fw, 1239 - .wdt_disable = ivpu_hw_40xx_wdt_disable, 1240 - .diagnose_failure = ivpu_hw_40xx_diagnose_failure, 1241 - .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get, 1242 - .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive, 1243 - .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, 1244 - .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq, 1245 - .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, 1246 - .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, 1247 - .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get, 1248 - .reg_db_set = ivpu_hw_40xx_reg_db_set, 1249 - .reg_ipc_rx_addr_get = ivpu_hw_40xx_reg_ipc_rx_addr_get, 1250 - .reg_ipc_rx_count_get = ivpu_hw_40xx_reg_ipc_rx_count_get, 1251 - .reg_ipc_tx_set = ivpu_hw_40xx_reg_ipc_tx_set, 1252 - .irq_clear = ivpu_hw_40xx_irq_clear, 1253 - .irq_enable = ivpu_hw_40xx_irq_enable, 1254 - .irq_disable = ivpu_hw_40xx_irq_disable, 1255 - .irq_handler = ivpu_hw_40xx_irq_handler, 1256 - };
+881
drivers/accel/ivpu/ivpu_hw_btrs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2020-2024 Intel Corporation 4 + */ 5 + 6 + #include "ivpu_drv.h" 7 + #include "ivpu_hw.h" 8 + #include "ivpu_hw_btrs.h" 9 + #include "ivpu_hw_btrs_lnl_reg.h" 10 + #include "ivpu_hw_btrs_mtl_reg.h" 11 + #include "ivpu_hw_reg_io.h" 12 + #include "ivpu_pm.h" 13 + 14 + #define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \ 15 + (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR))) 16 + 17 + #define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \ 18 + (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \ 19 + (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \ 20 + (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \ 21 + (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \ 22 + (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR))) 23 + 24 + #define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \ 25 + FREQ_CHANGE))) 26 + 27 + #define BTRS_IRQ_DISABLE_MASK ((u32)-1) 28 + 29 + #define BTRS_LNL_ALL_IRQ_MASK ((u32)-1) 30 + 31 + #define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3) 32 + #define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3) 33 + #define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3) 34 + #define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3) 35 + #define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0) 36 + 37 + #define PLL_CDYN_DEFAULT 0x80 38 + #define PLL_EPP_DEFAULT 0x80 39 + #define PLL_CONFIG_DEFAULT 0x0 40 + #define PLL_SIMULATION_FREQ 10000000 41 + #define PLL_REF_CLK_FREQ 50000000 42 + #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) 43 + #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) 44 + #define TIMEOUT_US (150 * USEC_PER_MSEC) 45 + 46 + /* Work point configuration values */ 47 + #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio)) 48 + #define MTL_CONFIG_1_TILE 0x01 49 + #define MTL_CONFIG_2_TILE 0x02 50 + #define MTL_PLL_RATIO_5_3 0x01 51 + #define MTL_PLL_RATIO_4_3 0x02 52 + #define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0 53 + #define BTRS_MTL_TILE_SKU_BOTH 0x3630 54 + 55 + #define BTRS_LNL_TILE_MAX_NUM 6 56 + #define BTRS_LNL_TILE_MAX_MASK 0x3f 57 + 58 + #define WEIGHTS_DEFAULT 0xf711f711u 59 + #define WEIGHTS_ATS_DEFAULT 0x0000f711u 60 + 61 + #define DCT_REQ 0x2 62 + #define DCT_ENABLE 0x1 63 + #define DCT_DISABLE 0x0 64 + 65 + int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev) 66 + { 67 + REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK); 68 + if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) { 69 + /* Writing 1s does not clear the interrupt status register */ 70 + REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 71 + return true; 72 + } 73 + 74 + return false; 75 + } 76 + 77 + static void freq_ratios_init_mtl(struct ivpu_device *vdev) 78 + { 79 + struct ivpu_hw_info *hw = vdev->hw; 80 + u32 fmin_fuse, fmax_fuse; 81 + 82 + fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE); 83 + hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 84 + hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse); 85 + 86 + fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE); 87 + hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 88 + } 89 + 90 + static void freq_ratios_init_lnl(struct ivpu_device *vdev) 91 + { 92 + struct ivpu_hw_info *hw = vdev->hw; 93 + u32 fmin_fuse, fmax_fuse; 94 + 95 + fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE); 96 + hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse); 97 + hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse); 98 + 99 + fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE); 100 + hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse); 101 + } 102 + 103 + void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev) 104 + { 105 + struct ivpu_hw_info *hw = vdev->hw; 106 + 107 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 108 + freq_ratios_init_mtl(vdev); 109 + else 110 + freq_ratios_init_lnl(vdev); 111 + 112 + hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 113 + hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 114 + hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio); 115 + } 116 + 117 + static bool tile_disable_check(u32 config) 118 + { 119 + /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */ 120 + if (config == 0) 121 + return true; 122 + 123 + if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1)) 124 + return false; 125 + 126 + if ((config & (config - 1)) == 0) 127 + return true; 128 + 129 + return false; 130 + } 131 + 132 + static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config) 133 + { 134 + u32 fuse; 135 + u32 config; 136 + 137 + fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE); 138 + if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) { 139 + ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse); 140 + return -EIO; 141 + } 142 + 143 + config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse); 144 + if (!tile_disable_check(config)) { 145 + ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config); 146 + return -EIO; 147 + } 148 + 149 + if (config) 150 + ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n", 151 + BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1); 152 + else 153 + ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM); 154 + 155 + *tile_fuse_config = config; 156 + return 0; 157 + } 158 + 159 + static int info_init_mtl(struct ivpu_device *vdev) 160 + { 161 + struct ivpu_hw_info *hw = vdev->hw; 162 + 163 + hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH; 164 + hw->sku = BTRS_MTL_TILE_SKU_BOTH; 165 + hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO; 166 + hw->sched_mode = ivpu_sched_mode; 167 + 168 + return 0; 169 + } 170 + 171 + static int info_init_lnl(struct ivpu_device *vdev) 172 + { 173 + struct ivpu_hw_info *hw = vdev->hw; 174 + u32 tile_fuse_config; 175 + int ret; 176 + 177 + ret = read_tile_config_fuse(vdev, &tile_fuse_config); 178 + if (ret) 179 + return ret; 180 + 181 + hw->sched_mode = ivpu_sched_mode; 182 + hw->tile_fuse = tile_fuse_config; 183 + hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; 184 + 185 + return 0; 186 + } 187 + 188 + int ivpu_hw_btrs_info_init(struct ivpu_device *vdev) 189 + { 190 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 191 + return info_init_mtl(vdev); 192 + else 193 + return info_init_lnl(vdev); 194 + } 195 + 196 + static int wp_request_sync(struct ivpu_device *vdev) 197 + { 198 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 199 + return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 200 + else 201 + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US); 202 + } 203 + 204 + static int wait_for_status_ready(struct ivpu_device *vdev, bool enable) 205 + { 206 + u32 exp_val = enable ? 0x1 : 0x0; 207 + 208 + if (IVPU_WA(punit_disabled)) 209 + return 0; 210 + 211 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 212 + return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US); 213 + else 214 + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US); 215 + } 216 + 217 + struct wp_request { 218 + u16 min; 219 + u16 max; 220 + u16 target; 221 + u16 cfg; 222 + u16 epp; 223 + u16 cdyn; 224 + }; 225 + 226 + static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp) 227 + { 228 + u32 val; 229 + 230 + val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0); 231 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val); 232 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val); 233 + REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val); 234 + 235 + val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1); 236 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val); 237 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val); 238 + REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val); 239 + 240 + val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2); 241 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val); 242 + REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val); 243 + 244 + val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD); 245 + val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val); 246 + REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val); 247 + } 248 + 249 + static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp) 250 + { 251 + u32 val; 252 + 253 + val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0); 254 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val); 255 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val); 256 + REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val); 257 + 258 + val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1); 259 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val); 260 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val); 261 + REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val); 262 + 263 + val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2); 264 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val); 265 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val); 266 + REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val); 267 + 268 + val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD); 269 + val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val); 270 + REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val); 271 + } 272 + 273 + static void wp_request(struct ivpu_device *vdev, struct wp_request *wp) 274 + { 275 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 276 + wp_request_mtl(vdev, wp); 277 + else 278 + wp_request_lnl(vdev, wp); 279 + } 280 + 281 + static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp) 282 + { 283 + int ret; 284 + 285 + ret = wp_request_sync(vdev); 286 + if (ret) { 287 + ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret); 288 + return ret; 289 + } 290 + 291 + wp_request(vdev, wp); 292 + 293 + ret = wp_request_sync(vdev); 294 + if (ret) 295 + ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret); 296 + 297 + return ret; 298 + } 299 + 300 + static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable) 301 + { 302 + struct ivpu_hw_info *hw = vdev->hw; 303 + 304 + wp->min = hw->pll.min_ratio; 305 + wp->max = hw->pll.max_ratio; 306 + 307 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 308 + wp->target = enable ? hw->pll.pn_ratio : 0; 309 + wp->cfg = enable ? hw->config : 0; 310 + wp->cdyn = 0; 311 + wp->epp = 0; 312 + } else { 313 + wp->target = hw->pll.pn_ratio; 314 + wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0; 315 + wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0; 316 + wp->epp = enable ? PLL_EPP_DEFAULT : 0; 317 + } 318 + 319 + /* Simics cannot start without at least one tile */ 320 + if (enable && ivpu_is_simics(vdev)) 321 + wp->cfg = 1; 322 + } 323 + 324 + static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) 325 + { 326 + u32 exp_val = enable ? 0x1 : 0x0; 327 + 328 + if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL) 329 + return 0; 330 + 331 + if (IVPU_WA(punit_disabled)) 332 + return 0; 333 + 334 + return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); 335 + } 336 + 337 + int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) 338 + { 339 + struct wp_request wp; 340 + int ret; 341 + 342 + if (IVPU_WA(punit_disabled)) { 343 + ivpu_dbg(vdev, PM, "Skipping workpoint request\n"); 344 + return 0; 345 + } 346 + 347 + prepare_wp_request(vdev, &wp, enable); 348 + 349 + ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n", 350 + PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn); 351 + 352 + ret = wp_request_send(vdev, &wp); 353 + if (ret) { 354 + ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret); 355 + return ret; 356 + } 357 + 358 + ret = wait_for_pll_lock(vdev, enable); 359 + if (ret) { 360 + ivpu_err(vdev, "Timed out waiting for PLL lock\n"); 361 + return ret; 362 + } 363 + 364 + ret = wait_for_status_ready(vdev, enable); 365 + if (ret) { 366 + ivpu_err(vdev, "Timed out waiting for NPU ready status\n"); 367 + return ret; 368 + } 369 + 370 + return 0; 371 + } 372 + 373 + static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable) 374 + { 375 + int ret; 376 + u32 val; 377 + 378 + ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 379 + if (ret) { 380 + ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 381 + return ret; 382 + } 383 + 384 + val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL); 385 + if (enable) 386 + val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 387 + else 388 + val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val); 389 + REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val); 390 + 391 + ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 392 + if (ret) 393 + ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 394 + 395 + return ret; 396 + } 397 + 398 + static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable) 399 + { 400 + int ret; 401 + u32 val; 402 + 403 + ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 404 + if (ret) { 405 + ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret); 406 + return ret; 407 + } 408 + 409 + val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL); 410 + if (enable) 411 + val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 412 + else 413 + val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val); 414 + REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val); 415 + 416 + ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US); 417 + if (ret) { 418 + ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret); 419 + return ret; 420 + } 421 + 422 + return 0; 423 + } 424 + 425 + static int d0i3_drive(struct ivpu_device *vdev, bool enable) 426 + { 427 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 428 + return d0i3_drive_mtl(vdev, enable); 429 + else 430 + return d0i3_drive_lnl(vdev, enable); 431 + } 432 + 433 + int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev) 434 + { 435 + int ret; 436 + 437 + if (IVPU_WA(punit_disabled)) 438 + return 0; 439 + 440 + ret = d0i3_drive(vdev, true); 441 + if (ret) 442 + ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret); 443 + 444 + udelay(5); /* VPU requires 5 us to complete the transition */ 445 + 446 + return ret; 447 + } 448 + 449 + int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev) 450 + { 451 + int ret; 452 + 453 + if (IVPU_WA(punit_disabled)) 454 + return 0; 455 + 456 + ret = d0i3_drive(vdev, false); 457 + if (ret) 458 + ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret); 459 + 460 + return ret; 461 + } 462 + 463 + int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev) 464 + { 465 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 466 + return 0; 467 + 468 + if (ivpu_is_simics(vdev)) 469 + return 0; 470 + 471 + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US); 472 + } 473 + 474 + void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev) 475 + { 476 + REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT); 477 + REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT); 478 + } 479 + 480 + static int ip_reset_mtl(struct ivpu_device *vdev) 481 + { 482 + int ret; 483 + u32 val; 484 + 485 + ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 486 + if (ret) { 487 + ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n"); 488 + return ret; 489 + } 490 + 491 + val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET); 492 + val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val); 493 + REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val); 494 + 495 + ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US); 496 + if (ret) 497 + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 498 + 499 + return ret; 500 + } 501 + 502 + static int ip_reset_lnl(struct ivpu_device *vdev) 503 + { 504 + int ret; 505 + u32 val; 506 + 507 + ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 508 + if (ret) { 509 + ivpu_err(vdev, "Wait for *_TRIGGER timed out\n"); 510 + return ret; 511 + } 512 + 513 + val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET); 514 + val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val); 515 + REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val); 516 + 517 + ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US); 518 + if (ret) 519 + ivpu_err(vdev, "Timed out waiting for RESET completion\n"); 520 + 521 + return ret; 522 + } 523 + 524 + int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev) 525 + { 526 + if (IVPU_WA(punit_disabled)) 527 + return 0; 528 + 529 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 530 + return ip_reset_mtl(vdev); 531 + else 532 + return ip_reset_lnl(vdev); 533 + } 534 + 535 + void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev) 536 + { 537 + u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 538 + 539 + if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) 540 + val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 541 + else 542 + val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val); 543 + 544 + REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 545 + } 546 + 547 + void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev) 548 + { 549 + ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n", 550 + REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable"); 551 + } 552 + 553 + void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev) 554 + { 555 + u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 556 + 557 + val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val); 558 + REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val); 559 + } 560 + 561 + bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev) 562 + { 563 + u32 val; 564 + 565 + if (IVPU_WA(punit_disabled)) 566 + return true; 567 + 568 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 569 + val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS); 570 + 571 + return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) && 572 + REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val); 573 + } else { 574 + val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS); 575 + 576 + return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) && 577 + REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val); 578 + } 579 + } 580 + 581 + int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev) 582 + { 583 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 584 + return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 585 + else 586 + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); 587 + } 588 + 589 + /* Handler for IRQs from Buttress core (irqB) */ 590 + bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq) 591 + { 592 + u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK; 593 + bool schedule_recovery = false; 594 + 595 + if (!status) 596 + return false; 597 + 598 + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) 599 + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", 600 + REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL)); 601 + 602 + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) { 603 + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 604 + REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1); 605 + schedule_recovery = true; 606 + } 607 + 608 + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) { 609 + u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 610 + 611 + ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 612 + ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log), 613 + REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log), 614 + REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log)); 615 + REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1); 616 + schedule_recovery = true; 617 + } 618 + 619 + /* This must be done after interrupts are cleared at the source. */ 620 + if (IVPU_WA(interrupt_clear_with_0)) 621 + /* 622 + * Writing 1 triggers an interrupt, so we can't perform read update write. 623 + * Clear local interrupt status by writing 0 to all bits. 624 + */ 625 + REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0); 626 + else 627 + REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status); 628 + 629 + if (schedule_recovery) 630 + ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 631 + 632 + return true; 633 + } 634 + 635 + /* Handler for IRQs from Buttress core (irqB) */ 636 + bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) 637 + { 638 + u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK; 639 + bool schedule_recovery = false; 640 + 641 + if (!status) 642 + return false; 643 + 644 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) 645 + ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); 646 + 647 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) 648 + ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ)); 649 + 650 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) { 651 + ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 652 + REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 653 + REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 654 + REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1); 655 + schedule_recovery = true; 656 + } 657 + 658 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) { 659 + ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 660 + REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1); 661 + schedule_recovery = true; 662 + } 663 + 664 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) { 665 + ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 666 + REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1); 667 + schedule_recovery = true; 668 + } 669 + 670 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) { 671 + ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x", 672 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 673 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 674 + REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1); 675 + schedule_recovery = true; 676 + } 677 + 678 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) { 679 + ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x", 680 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 681 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 682 + REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1); 683 + schedule_recovery = true; 684 + } 685 + 686 + /* This must be done after interrupts are cleared at the source. */ 687 + REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status); 688 + 689 + if (schedule_recovery) 690 + ivpu_pm_trigger_recovery(vdev, "Buttress IRQ"); 691 + 692 + return true; 693 + } 694 + 695 + static void dct_drive_40xx(struct ivpu_device *vdev, u32 dct_val) 696 + { 697 + u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX); 698 + 699 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, CMD, DCT_REQ, val); 700 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM1, 701 + dct_val ? DCT_ENABLE : DCT_DISABLE, val); 702 + val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM2, dct_val, val); 703 + 704 + REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX, val); 705 + } 706 + 707 + void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val) 708 + { 709 + return dct_drive_40xx(vdev, dct_val); 710 + } 711 + 712 + static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config) 713 + { 714 + u32 pll_clock = PLL_REF_CLK_FREQ * ratio; 715 + u32 cpu_clock; 716 + 717 + if ((config & 0xff) == MTL_PLL_RATIO_4_3) 718 + cpu_clock = pll_clock * 2 / 4; 719 + else 720 + cpu_clock = pll_clock * 2 / 5; 721 + 722 + return cpu_clock; 723 + } 724 + 725 + u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio) 726 + { 727 + struct ivpu_hw_info *hw = vdev->hw; 728 + 729 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 730 + return pll_ratio_to_freq_mtl(ratio, hw->config); 731 + else 732 + return PLL_RATIO_TO_FREQ(ratio); 733 + } 734 + 735 + static u32 pll_freq_get_mtl(struct ivpu_device *vdev) 736 + { 737 + u32 pll_curr_ratio; 738 + 739 + pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL); 740 + pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK; 741 + 742 + if (!ivpu_is_silicon(vdev)) 743 + return PLL_SIMULATION_FREQ; 744 + 745 + return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config); 746 + } 747 + 748 + static u32 pll_freq_get_lnl(struct ivpu_device *vdev) 749 + { 750 + u32 pll_curr_ratio; 751 + 752 + pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ); 753 + pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK; 754 + 755 + return PLL_RATIO_TO_FREQ(pll_curr_ratio); 756 + } 757 + 758 + u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev) 759 + { 760 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 761 + return pll_freq_get_mtl(vdev); 762 + else 763 + return pll_freq_get_lnl(vdev); 764 + } 765 + 766 + u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev) 767 + { 768 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 769 + return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET); 770 + else 771 + return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET); 772 + } 773 + 774 + u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev) 775 + { 776 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 777 + return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE); 778 + else 779 + return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE); 780 + } 781 + 782 + u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev) 783 + { 784 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 785 + return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE); 786 + else 787 + return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE); 788 + } 789 + 790 + void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev) 791 + { 792 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 793 + REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 794 + else 795 + REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 796 + } 797 + 798 + void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev) 799 + { 800 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 801 + REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 802 + else 803 + REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 804 + } 805 + 806 + void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev) 807 + { 808 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 809 + REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK)); 810 + REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0); 811 + } else { 812 + REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK)); 813 + REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0); 814 + } 815 + } 816 + 817 + void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev) 818 + { 819 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) { 820 + REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1); 821 + REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK); 822 + } else { 823 + REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1); 824 + REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK); 825 + } 826 + } 827 + 828 + static void diagnose_failure_mtl(struct ivpu_device *vdev) 829 + { 830 + u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK; 831 + 832 + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg)) 833 + ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0)); 834 + 835 + if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) { 836 + u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG); 837 + 838 + ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx", 839 + log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log), 840 + REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log), 841 + REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log)); 842 + } 843 + } 844 + 845 + static void diagnose_failure_lnl(struct ivpu_device *vdev) 846 + { 847 + u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK; 848 + 849 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) { 850 + ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n", 851 + REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1), 852 + REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2)); 853 + } 854 + 855 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg)) 856 + ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG)); 857 + 858 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg)) 859 + ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG)); 860 + 861 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg)) 862 + ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n", 863 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW), 864 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH)); 865 + 866 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg)) 867 + ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n", 868 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW), 869 + REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH)); 870 + 871 + if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg)) 872 + ivpu_err(vdev, "Survivability IRQ\n"); 873 + } 874 + 875 + void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev) 876 + { 877 + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) 878 + return diagnose_failure_mtl(vdev); 879 + else 880 + return diagnose_failure_lnl(vdev); 881 + }
+46
drivers/accel/ivpu/ivpu_hw_btrs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020-2024 Intel Corporation 4 + */ 5 + 6 + #ifndef __IVPU_HW_BTRS_H__ 7 + #define __IVPU_HW_BTRS_H__ 8 + 9 + #include "ivpu_drv.h" 10 + #include "ivpu_hw_37xx_reg.h" 11 + #include "ivpu_hw_40xx_reg.h" 12 + #include "ivpu_hw_reg_io.h" 13 + 14 + #define PLL_PROFILING_FREQ_DEFAULT 38400000 15 + #define PLL_PROFILING_FREQ_HIGH 400000000 16 + #define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ) 17 + 18 + int ivpu_hw_btrs_info_init(struct ivpu_device *vdev); 19 + void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev); 20 + int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev); 21 + int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable); 22 + int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev); 23 + int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev); 24 + int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev); 25 + void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev); 26 + bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev); 27 + int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev); 28 + int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev); 29 + void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev); 30 + void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev); 31 + void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev); 32 + bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); 33 + bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); 34 + void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val); 35 + u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev); 36 + u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio); 37 + u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); 38 + u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); 39 + u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev); 40 + void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev); 41 + void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev); 42 + void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev); 43 + void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev); 44 + void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev); 45 + 46 + #endif /* __IVPU_HW_BTRS_H__ */
+1174
drivers/accel/ivpu/ivpu_hw_ip.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2020-2024 Intel Corporation 4 + */ 5 + 6 + #include "ivpu_drv.h" 7 + #include "ivpu_fw.h" 8 + #include "ivpu_hw.h" 9 + #include "ivpu_hw_37xx_reg.h" 10 + #include "ivpu_hw_40xx_reg.h" 11 + #include "ivpu_hw_ip.h" 12 + #include "ivpu_hw_reg_io.h" 13 + #include "ivpu_mmu.h" 14 + #include "ivpu_pm.h" 15 + 16 + #define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0 17 + #define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH 18 18 + #define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT 3 19 + #define PWR_ISLAND_STATUS_DLY_FREQ_HIGH 46 20 + #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) 21 + 22 + #define TIM_SAFE_ENABLE 0xf1d0dead 23 + #define TIM_WATCHDOG_RESET_VALUE 0xffffffff 24 + 25 + #define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 26 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 27 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 28 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 29 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 30 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 31 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 32 + 33 + #define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 34 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 35 + (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 36 + 37 + #define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX) 38 + 39 + #define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ 40 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ 41 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \ 42 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \ 43 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \ 44 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \ 45 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT))) 46 + 47 + #define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \ 48 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \ 49 + (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT))) 50 + 51 + #define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX) 52 + 53 + #define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 54 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 55 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 56 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 57 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 58 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 59 + (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 60 + 61 + #define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \ 62 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \ 63 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \ 64 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \ 65 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \ 66 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \ 67 + (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX))) 68 + 69 + static int wait_for_ip_bar(struct ivpu_device *vdev) 70 + { 71 + return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100); 72 + } 73 + 74 + static void host_ss_rst_clr(struct ivpu_device *vdev) 75 + { 76 + u32 val = 0; 77 + 78 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val); 79 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val); 80 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val); 81 + 82 + REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val); 83 + } 84 + 85 + static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val) 86 + { 87 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 88 + 89 + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 90 + return -EIO; 91 + 92 + return 0; 93 + } 94 + 95 + static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val) 96 + { 97 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 98 + 99 + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val)) 100 + return -EIO; 101 + 102 + return 0; 103 + } 104 + 105 + static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 106 + { 107 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 108 + return host_ss_noc_qreqn_check_37xx(vdev, exp_val); 109 + else 110 + return host_ss_noc_qreqn_check_40xx(vdev, exp_val); 111 + } 112 + 113 + static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val) 114 + { 115 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN); 116 + 117 + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 118 + return -EIO; 119 + 120 + return 0; 121 + } 122 + 123 + static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val) 124 + { 125 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN); 126 + 127 + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val)) 128 + return -EIO; 129 + 130 + return 0; 131 + } 132 + 133 + static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 134 + { 135 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 136 + return host_ss_noc_qacceptn_check_37xx(vdev, exp_val); 137 + else 138 + return host_ss_noc_qacceptn_check_40xx(vdev, exp_val); 139 + } 140 + 141 + static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val) 142 + { 143 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY); 144 + 145 + if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 146 + return -EIO; 147 + 148 + return 0; 149 + } 150 + 151 + static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val) 152 + { 153 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY); 154 + 155 + if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val)) 156 + return -EIO; 157 + 158 + return 0; 159 + } 160 + 161 + static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 162 + { 163 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 164 + return host_ss_noc_qdeny_check_37xx(vdev, exp_val); 165 + else 166 + return host_ss_noc_qdeny_check_40xx(vdev, exp_val); 167 + } 168 + 169 + static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val) 170 + { 171 + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 172 + 173 + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 174 + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 175 + return -EIO; 176 + 177 + return 0; 178 + } 179 + 180 + static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val) 181 + { 182 + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 183 + 184 + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) || 185 + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val)) 186 + return -EIO; 187 + 188 + return 0; 189 + } 190 + 191 + static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val) 192 + { 193 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 194 + return top_noc_qrenqn_check_37xx(vdev, exp_val); 195 + else 196 + return top_noc_qrenqn_check_40xx(vdev, exp_val); 197 + } 198 + 199 + int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev) 200 + { 201 + int ret; 202 + 203 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 204 + ret = wait_for_ip_bar(vdev); 205 + if (ret) { 206 + ivpu_err(vdev, "Timed out waiting for NPU IP bar\n"); 207 + return ret; 208 + } 209 + host_ss_rst_clr(vdev); 210 + } 211 + 212 + ret = host_ss_noc_qreqn_check(vdev, 0x0); 213 + if (ret) { 214 + ivpu_err(vdev, "Failed qreqn check: %d\n", ret); 215 + return ret; 216 + } 217 + 218 + ret = host_ss_noc_qacceptn_check(vdev, 0x0); 219 + if (ret) { 220 + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 221 + return ret; 222 + } 223 + 224 + ret = host_ss_noc_qdeny_check(vdev, 0x0); 225 + if (ret) 226 + ivpu_err(vdev, "Failed qdeny check %d\n", ret); 227 + 228 + return ret; 229 + } 230 + 231 + static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable) 232 + { 233 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN); 234 + 235 + if (enable) 236 + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val); 237 + else 238 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val); 239 + 240 + REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val); 241 + } 242 + 243 + static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable) 244 + { 245 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN); 246 + 247 + if (enable) 248 + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 249 + else 250 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val); 251 + 252 + REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val); 253 + } 254 + 255 + void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev) 256 + { 257 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 258 + idle_gen_drive_37xx(vdev, true); 259 + else 260 + idle_gen_drive_40xx(vdev, true); 261 + } 262 + 263 + void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev) 264 + { 265 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 266 + idle_gen_drive_37xx(vdev, false); 267 + else 268 + idle_gen_drive_40xx(vdev, false); 269 + } 270 + 271 + static void pwr_island_delay_set_50xx(struct ivpu_device *vdev) 272 + { 273 + u32 val, post, status; 274 + 275 + if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) { 276 + post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT; 277 + status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT; 278 + } else { 279 + post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH; 280 + status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH; 281 + } 282 + 283 + val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY); 284 + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val); 285 + REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val); 286 + 287 + val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY); 288 + val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val); 289 + REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val); 290 + } 291 + 292 + static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable) 293 + { 294 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 295 + 296 + if (enable) 297 + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 298 + else 299 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val); 300 + 301 + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 302 + } 303 + 304 + static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable) 305 + { 306 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0); 307 + 308 + if (enable) 309 + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 310 + else 311 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val); 312 + 313 + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val); 314 + 315 + if (enable) 316 + ndelay(500); 317 + } 318 + 319 + static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable) 320 + { 321 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0); 322 + 323 + if (enable) 324 + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 325 + else 326 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val); 327 + 328 + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 329 + 330 + if (!enable) 331 + ndelay(500); 332 + } 333 + 334 + static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable) 335 + { 336 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0); 337 + 338 + if (enable) 339 + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 340 + else 341 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val); 342 + 343 + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val); 344 + } 345 + 346 + static void pwr_island_enable(struct ivpu_device *vdev) 347 + { 348 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 349 + pwr_island_trickle_drive_37xx(vdev, true); 350 + pwr_island_drive_37xx(vdev, true); 351 + } else { 352 + pwr_island_trickle_drive_40xx(vdev, true); 353 + pwr_island_drive_40xx(vdev, true); 354 + } 355 + } 356 + 357 + static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val) 358 + { 359 + if (IVPU_WA(punit_disabled)) 360 + return 0; 361 + 362 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 363 + return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val, 364 + PWR_ISLAND_STATUS_TIMEOUT_US); 365 + else 366 + return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val, 367 + PWR_ISLAND_STATUS_TIMEOUT_US); 368 + } 369 + 370 + static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable) 371 + { 372 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0); 373 + 374 + if (enable) 375 + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 376 + else 377 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val); 378 + 379 + REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val); 380 + } 381 + 382 + static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable) 383 + { 384 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0); 385 + 386 + if (enable) 387 + val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 388 + else 389 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val); 390 + 391 + REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val); 392 + } 393 + 394 + static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable) 395 + { 396 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 397 + pwr_island_isolation_drive_37xx(vdev, enable); 398 + else 399 + pwr_island_isolation_drive_40xx(vdev, enable); 400 + } 401 + 402 + static void pwr_island_isolation_disable(struct ivpu_device *vdev) 403 + { 404 + pwr_island_isolation_drive(vdev, false); 405 + } 406 + 407 + static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable) 408 + { 409 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET); 410 + 411 + if (enable) { 412 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 413 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 414 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 415 + } else { 416 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val); 417 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val); 418 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val); 419 + } 420 + 421 + REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val); 422 + } 423 + 424 + static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable) 425 + { 426 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN); 427 + 428 + if (enable) { 429 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 430 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 431 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 432 + } else { 433 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val); 434 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val); 435 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val); 436 + } 437 + 438 + REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val); 439 + } 440 + 441 + static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable) 442 + { 443 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 444 + host_ss_clk_drive_37xx(vdev, enable); 445 + else 446 + host_ss_clk_drive_40xx(vdev, enable); 447 + } 448 + 449 + static void host_ss_clk_enable(struct ivpu_device *vdev) 450 + { 451 + host_ss_clk_drive(vdev, true); 452 + } 453 + 454 + static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable) 455 + { 456 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET); 457 + 458 + if (enable) { 459 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 460 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 461 + val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 462 + } else { 463 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val); 464 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val); 465 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val); 466 + } 467 + 468 + REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val); 469 + } 470 + 471 + static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable) 472 + { 473 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN); 474 + 475 + if (enable) { 476 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 477 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 478 + val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 479 + } else { 480 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val); 481 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val); 482 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val); 483 + } 484 + 485 + REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val); 486 + } 487 + 488 + static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable) 489 + { 490 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 491 + host_ss_rst_drive_37xx(vdev, enable); 492 + else 493 + host_ss_rst_drive_40xx(vdev, enable); 494 + } 495 + 496 + static void host_ss_rst_enable(struct ivpu_device *vdev) 497 + { 498 + host_ss_rst_drive(vdev, true); 499 + } 500 + 501 + static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable) 502 + { 503 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN); 504 + 505 + if (enable) 506 + val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 507 + else 508 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 509 + REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val); 510 + } 511 + 512 + static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable) 513 + { 514 + u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN); 515 + 516 + if (enable) 517 + val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 518 + else 519 + val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val); 520 + REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val); 521 + } 522 + 523 + static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable) 524 + { 525 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 526 + host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable); 527 + else 528 + host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable); 529 + } 530 + 531 + static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable) 532 + { 533 + int ret; 534 + 535 + host_ss_noc_qreqn_top_socmmio_drive(vdev, enable); 536 + 537 + ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 538 + if (ret) { 539 + ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret); 540 + return ret; 541 + } 542 + 543 + ret = host_ss_noc_qdeny_check(vdev, 0x0); 544 + if (ret) 545 + ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret); 546 + 547 + return ret; 548 + } 549 + 550 + static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable) 551 + { 552 + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN); 553 + 554 + if (enable) { 555 + val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 556 + val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 557 + } else { 558 + val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val); 559 + val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 560 + } 561 + 562 + REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val); 563 + } 564 + 565 + static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable) 566 + { 567 + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN); 568 + 569 + if (enable) { 570 + val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 571 + val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 572 + } else { 573 + val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val); 574 + val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val); 575 + } 576 + 577 + REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val); 578 + } 579 + 580 + static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable) 581 + { 582 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 583 + top_noc_qreqn_drive_37xx(vdev, enable); 584 + else 585 + top_noc_qreqn_drive_40xx(vdev, enable); 586 + } 587 + 588 + int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev) 589 + { 590 + return host_ss_axi_drive(vdev, true); 591 + } 592 + 593 + static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val) 594 + { 595 + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN); 596 + 597 + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 598 + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 599 + return -EIO; 600 + 601 + return 0; 602 + } 603 + 604 + static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val) 605 + { 606 + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN); 607 + 608 + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) || 609 + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val)) 610 + return -EIO; 611 + 612 + return 0; 613 + } 614 + 615 + static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val) 616 + { 617 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 618 + return top_noc_qacceptn_check_37xx(vdev, exp_val); 619 + else 620 + return top_noc_qacceptn_check_40xx(vdev, exp_val); 621 + } 622 + 623 + static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val) 624 + { 625 + u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY); 626 + 627 + if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 628 + !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 629 + return -EIO; 630 + 631 + return 0; 632 + } 633 + 634 + static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val) 635 + { 636 + u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY); 637 + 638 + if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) || 639 + !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val)) 640 + return -EIO; 641 + 642 + return 0; 643 + } 644 + 645 + static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val) 646 + { 647 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 648 + return top_noc_qdeny_check_37xx(vdev, exp_val); 649 + else 650 + return top_noc_qdeny_check_40xx(vdev, exp_val); 651 + } 652 + 653 + static int top_noc_drive(struct ivpu_device *vdev, bool enable) 654 + { 655 + int ret; 656 + 657 + top_noc_qreqn_drive(vdev, enable); 658 + 659 + ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0); 660 + if (ret) { 661 + ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret); 662 + return ret; 663 + } 664 + 665 + ret = top_noc_qdeny_check(vdev, 0x0); 666 + if (ret) 667 + ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret); 668 + 669 + return ret; 670 + } 671 + 672 + int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev) 673 + { 674 + return top_noc_drive(vdev, true); 675 + } 676 + 677 + static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable) 678 + { 679 + u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE); 680 + 681 + if (enable) 682 + val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 683 + else 684 + val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val); 685 + 686 + REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val); 687 + } 688 + 689 + int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev) 690 + { 691 + int ret; 692 + 693 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX) 694 + pwr_island_delay_set_50xx(vdev); 695 + 696 + pwr_island_enable(vdev); 697 + 698 + ret = wait_for_pwr_island_status(vdev, 0x1); 699 + if (ret) { 700 + ivpu_err(vdev, "Timed out waiting for power island status\n"); 701 + return ret; 702 + } 703 + 704 + ret = top_noc_qreqn_check(vdev, 0x0); 705 + if (ret) { 706 + ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret); 707 + return ret; 708 + } 709 + 710 + host_ss_clk_enable(vdev); 711 + pwr_island_isolation_disable(vdev); 712 + host_ss_rst_enable(vdev); 713 + 714 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 715 + dpu_active_drive_37xx(vdev, true); 716 + 717 + return ret; 718 + } 719 + 720 + u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev) 721 + { 722 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 723 + return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT); 724 + else 725 + return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT); 726 + } 727 + 728 + static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev) 729 + { 730 + u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES); 731 + 732 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val); 733 + val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val); 734 + 735 + if (ivpu_is_force_snoop_enabled(vdev)) 736 + val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 737 + else 738 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val); 739 + 740 + REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val); 741 + } 742 + 743 + static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev) 744 + { 745 + u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES); 746 + 747 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val); 748 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val); 749 + 750 + if (ivpu_is_force_snoop_enabled(vdev)) 751 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 752 + else 753 + val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val); 754 + 755 + REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val); 756 + } 757 + 758 + void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev) 759 + { 760 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 761 + return ivpu_hw_ip_snoop_disable_37xx(vdev); 762 + else 763 + return ivpu_hw_ip_snoop_disable_40xx(vdev); 764 + } 765 + 766 + static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev) 767 + { 768 + u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV); 769 + 770 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 771 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 772 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 773 + val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 774 + 775 + REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val); 776 + } 777 + 778 + static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev) 779 + { 780 + u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV); 781 + 782 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val); 783 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val); 784 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val); 785 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val); 786 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val); 787 + val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val); 788 + 789 + REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val); 790 + } 791 + 792 + void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev) 793 + { 794 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 795 + return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev); 796 + else 797 + return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev); 798 + } 799 + 800 + static int soc_cpu_boot_37xx(struct ivpu_device *vdev) 801 + { 802 + u32 val; 803 + 804 + val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC); 805 + val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val); 806 + 807 + val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val); 808 + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 809 + 810 + val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 811 + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 812 + 813 + val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val); 814 + REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val); 815 + 816 + val = vdev->fw->entry_point >> 9; 817 + REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 818 + 819 + val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val); 820 + REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val); 821 + 822 + ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 823 + vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume"); 824 + 825 + return 0; 826 + } 827 + 828 + static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val) 829 + { 830 + u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN); 831 + 832 + if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val)) 833 + return -EIO; 834 + 835 + return 0; 836 + } 837 + 838 + static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val) 839 + { 840 + u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY); 841 + 842 + if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val)) 843 + return -EIO; 844 + 845 + return 0; 846 + } 847 + 848 + static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable) 849 + { 850 + u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN); 851 + 852 + if (enable) 853 + val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 854 + else 855 + val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val); 856 + REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val); 857 + } 858 + 859 + static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable) 860 + { 861 + int ret; 862 + 863 + cpu_noc_top_mmio_drive_40xx(vdev, enable); 864 + 865 + ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0); 866 + if (ret) { 867 + ivpu_err(vdev, "Failed qacceptn check: %d\n", ret); 868 + return ret; 869 + } 870 + 871 + ret = cpu_noc_qdeny_check_40xx(vdev, 0x0); 872 + if (ret) 873 + ivpu_err(vdev, "Failed qdeny check: %d\n", ret); 874 + 875 + return ret; 876 + } 877 + 878 + static int soc_cpu_enable(struct ivpu_device *vdev) 879 + { 880 + return soc_cpu_drive_40xx(vdev, true); 881 + } 882 + 883 + static int soc_cpu_boot_40xx(struct ivpu_device *vdev) 884 + { 885 + int ret; 886 + u32 val; 887 + u64 val64; 888 + 889 + ret = soc_cpu_enable(vdev); 890 + if (ret) { 891 + ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret); 892 + return ret; 893 + } 894 + 895 + val64 = vdev->fw->entry_point; 896 + val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1; 897 + REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64); 898 + 899 + val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO); 900 + val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val); 901 + REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val); 902 + 903 + ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n", 904 + ivpu_fw_is_cold_boot(vdev) ? "cold boot" : "resume"); 905 + 906 + return 0; 907 + } 908 + 909 + int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev) 910 + { 911 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 912 + return soc_cpu_boot_37xx(vdev); 913 + else 914 + return soc_cpu_boot_40xx(vdev); 915 + } 916 + 917 + static void wdt_disable_37xx(struct ivpu_device *vdev) 918 + { 919 + u32 val; 920 + 921 + /* Enable writing and set non-zero WDT value */ 922 + REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 923 + REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 924 + 925 + /* Enable writing and disable watchdog timer */ 926 + REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 927 + REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0); 928 + 929 + /* Now clear the timeout interrupt */ 930 + val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG); 931 + val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 932 + REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); 933 + } 934 + 935 + static void wdt_disable_40xx(struct ivpu_device *vdev) 936 + { 937 + u32 val; 938 + 939 + REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 940 + REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE); 941 + 942 + REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE); 943 + REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0); 944 + 945 + val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG); 946 + val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val); 947 + REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); 948 + } 949 + 950 + void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev) 951 + { 952 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 953 + return wdt_disable_37xx(vdev); 954 + else 955 + return wdt_disable_40xx(vdev); 956 + } 957 + 958 + static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev) 959 + { 960 + u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT); 961 + 962 + return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 963 + } 964 + 965 + static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev) 966 + { 967 + u32 count = REGV_RD32_SILENT(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT); 968 + 969 + return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count); 970 + } 971 + 972 + u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev) 973 + { 974 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 975 + return ipc_rx_count_get_37xx(vdev); 976 + else 977 + return ipc_rx_count_get_40xx(vdev); 978 + } 979 + 980 + void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev) 981 + { 982 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 983 + REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX); 984 + REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX); 985 + } else { 986 + REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX); 987 + REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX); 988 + } 989 + } 990 + 991 + void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev) 992 + { 993 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { 994 + REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 995 + REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0); 996 + } else { 997 + REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull); 998 + REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul); 999 + } 1000 + } 1001 + 1002 + static void diagnose_failure_37xx(struct ivpu_device *vdev) 1003 + { 1004 + u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX; 1005 + 1006 + if (ipc_rx_count_get_37xx(vdev)) 1007 + ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1008 + 1009 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg)) 1010 + ivpu_err(vdev, "WDT MSS timeout detected\n"); 1011 + 1012 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg)) 1013 + ivpu_err(vdev, "WDT NCE timeout detected\n"); 1014 + 1015 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg)) 1016 + ivpu_err(vdev, "NOC Firewall irq detected\n"); 1017 + } 1018 + 1019 + static void diagnose_failure_40xx(struct ivpu_device *vdev) 1020 + { 1021 + u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX; 1022 + 1023 + if (ipc_rx_count_get_40xx(vdev)) 1024 + ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ"); 1025 + 1026 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg)) 1027 + ivpu_err(vdev, "WDT MSS timeout detected\n"); 1028 + 1029 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg)) 1030 + ivpu_err(vdev, "WDT NCE timeout detected\n"); 1031 + 1032 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg)) 1033 + ivpu_err(vdev, "NOC Firewall irq detected\n"); 1034 + } 1035 + 1036 + void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev) 1037 + { 1038 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 1039 + diagnose_failure_37xx(vdev); 1040 + else 1041 + diagnose_failure_40xx(vdev); 1042 + } 1043 + 1044 + void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev) 1045 + { 1046 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 1047 + REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX); 1048 + else 1049 + REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX); 1050 + } 1051 + 1052 + static void irq_wdt_nce_handler(struct ivpu_device *vdev) 1053 + { 1054 + ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ"); 1055 + } 1056 + 1057 + static void irq_wdt_mss_handler(struct ivpu_device *vdev) 1058 + { 1059 + ivpu_hw_ip_wdt_disable(vdev); 1060 + ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ"); 1061 + } 1062 + 1063 + static void irq_noc_firewall_handler(struct ivpu_device *vdev) 1064 + { 1065 + ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); 1066 + } 1067 + 1068 + /* Handler for IRQs from NPU core */ 1069 + bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq, bool *wake_thread) 1070 + { 1071 + u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX; 1072 + 1073 + if (!status) 1074 + return false; 1075 + 1076 + REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); 1077 + 1078 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 1079 + ivpu_mmu_irq_evtq_handler(vdev); 1080 + 1081 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1082 + ivpu_ipc_irq_handler(vdev, wake_thread); 1083 + 1084 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1085 + ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 1086 + 1087 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 1088 + ivpu_mmu_irq_gerr_handler(vdev); 1089 + 1090 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 1091 + irq_wdt_mss_handler(vdev); 1092 + 1093 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 1094 + irq_wdt_nce_handler(vdev); 1095 + 1096 + if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 1097 + irq_noc_firewall_handler(vdev); 1098 + 1099 + return true; 1100 + } 1101 + 1102 + /* Handler for IRQs from NPU core */ 1103 + bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq, bool *wake_thread) 1104 + { 1105 + u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX; 1106 + 1107 + if (!status) 1108 + return false; 1109 + 1110 + REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); 1111 + 1112 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) 1113 + ivpu_mmu_irq_evtq_handler(vdev); 1114 + 1115 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) 1116 + ivpu_ipc_irq_handler(vdev, wake_thread); 1117 + 1118 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) 1119 + ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); 1120 + 1121 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status)) 1122 + ivpu_mmu_irq_gerr_handler(vdev); 1123 + 1124 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status)) 1125 + irq_wdt_mss_handler(vdev); 1126 + 1127 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status)) 1128 + irq_wdt_nce_handler(vdev); 1129 + 1130 + if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) 1131 + irq_noc_firewall_handler(vdev); 1132 + 1133 + return true; 1134 + } 1135 + 1136 + static void db_set_37xx(struct ivpu_device *vdev, u32 db_id) 1137 + { 1138 + u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0; 1139 + u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET); 1140 + 1141 + REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 1142 + } 1143 + 1144 + static void db_set_40xx(struct ivpu_device *vdev, u32 db_id) 1145 + { 1146 + u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0; 1147 + u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET); 1148 + 1149 + REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val); 1150 + } 1151 + 1152 + void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id) 1153 + { 1154 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 1155 + db_set_37xx(vdev, db_id); 1156 + else 1157 + db_set_40xx(vdev, db_id); 1158 + } 1159 + 1160 + u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev) 1161 + { 1162 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 1163 + return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM); 1164 + else 1165 + return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM); 1166 + } 1167 + 1168 + void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr) 1169 + { 1170 + if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) 1171 + REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 1172 + else 1173 + REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr); 1174 + }
+36
drivers/accel/ivpu/ivpu_hw_ip.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020-2024 Intel Corporation 4 + */ 5 + 6 + #ifndef __IVPU_HW_IP_H__ 7 + #define __IVPU_HW_IP_H__ 8 + 9 + #include "ivpu_drv.h" 10 + 11 + int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev); 12 + void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev); 13 + void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev); 14 + int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev); 15 + int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev); 16 + int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev); 17 + u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev); 18 + void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev); 19 + void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev); 20 + int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev); 21 + void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev); 22 + void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev); 23 + u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev); 24 + void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev); 25 + bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq, bool *wake_thread); 26 + bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq, bool *wake_thread); 27 + void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id); 28 + u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev); 29 + void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr); 30 + void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev); 31 + void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev); 32 + void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev); 33 + void ivpu_hw_ip_fabric_req_override_enable_50xx(struct ivpu_device *vdev); 34 + void ivpu_hw_ip_fabric_req_override_disable_50xx(struct ivpu_device *vdev); 35 + 36 + #endif /* __IVPU_HW_IP_H__ */
+3 -3
drivers/accel/ivpu/ivpu_ipc.c
··· 129 129 130 130 static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) 131 131 { 132 - ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr); 132 + ivpu_hw_ipc_tx_set(vdev, vpu_addr); 133 133 } 134 134 135 135 static void ··· 392 392 * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt. 393 393 * Without purge IPC FIFO to 0 next IPC interrupts won't be generated. 394 394 */ 395 - while (ivpu_hw_reg_ipc_rx_count_get(vdev)) { 396 - vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev); 395 + while (ivpu_hw_ipc_rx_count_get(vdev)) { 396 + vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev); 397 397 if (vpu_addr == REG_IO_ERROR) { 398 398 ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n"); 399 399 return;
+1 -1
drivers/accel/ivpu/ivpu_job.c
··· 27 27 28 28 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) 29 29 { 30 - ivpu_hw_reg_db_set(vdev, cmdq->db_id); 30 + ivpu_hw_db_set(vdev, cmdq->db_id); 31 31 } 32 32 33 33 static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,