Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mtk-soc-for-v6.13' of https://git.kernel.org/pub/scm/linux/kernel/git/mediatek/linux into arm/drivers

MediaTek soc driver updates for v6.13

This adds support for the MT8188 SoC in the MediaTek Regulator
Coupler driver, allowing stable GPU DVFS on this chip;

Moreover, this adds a new MediaTek DVFS Resource Collector (DVFSRC)
driver, allowing to enable other drivers (interconnect, regulator)
which can now communicate with the DVFSRC hardware.

Last but not least, this includes some cleanups for the CMDQ Helper
and MediaTek SVS drivers.

* tag 'mtk-soc-for-v6.13' of https://git.kernel.org/pub/scm/linux/kernel/git/mediatek/linux:
soc: mediatek: mtk-svs: Call of_node_put(np) only once in svs_get_subsys_device()
soc: mediatek: mediatek-regulator-coupler: Support mt8188
soc: mediatek: mtk-cmdq: Move cmdq_instruction init to declaration
soc: mediatek: mtk-cmdq: Move mask build and append to function
soc: mediatek: Add MediaTek DVFS Resource Collector (DVFSRC) driver
dt-bindings: soc: mediatek: Add DVFSRC bindings for MT8183 and MT8195

Link: https://lore.kernel.org/r/20241104112625.161365-2-angelogioacchino.delregno@collabora.com
Signed-off-by: Arnd Bergmann <arnd@arndb.de>

+787 -127
+83
Documentation/devicetree/bindings/soc/mediatek/mediatek,mt8183-dvfsrc.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/soc/mediatek/mediatek,mt8183-dvfsrc.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: MediaTek Dynamic Voltage and Frequency Scaling Resource Collector (DVFSRC) 8 + 9 + description: 10 + The Dynamic Voltage and Frequency Scaling Resource Collector (DVFSRC) is a 11 + Hardware module used to collect all the requests from both software and the 12 + various remote processors embedded into the SoC and decide about a minimum 13 + operating voltage and a minimum DRAM frequency to fulfill those requests in 14 + an effort to provide the best achievable performance per watt. 15 + This hardware IP is capable of transparently performing direct register R/W 16 + on all of the DVFSRC-controlled regulators and SoC bandwidth knobs. 17 + 18 + maintainers: 19 + - AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> 20 + - Henry Chen <henryc.chen@mediatek.com> 21 + 22 + properties: 23 + compatible: 24 + oneOf: 25 + - enum: 26 + - mediatek,mt8183-dvfsrc 27 + - mediatek,mt8195-dvfsrc 28 + - items: 29 + - const: mediatek,mt8192-dvfsrc 30 + - const: mediatek,mt8195-dvfsrc 31 + 32 + reg: 33 + maxItems: 1 34 + description: DVFSRC common register address and length. 35 + 36 + regulators: 37 + type: object 38 + $ref: /schemas/regulator/mediatek,mt6873-dvfsrc-regulator.yaml# 39 + 40 + interconnect: 41 + type: object 42 + $ref: /schemas/interconnect/mediatek,mt8183-emi.yaml# 43 + 44 + required: 45 + - compatible 46 + - reg 47 + 48 + additionalProperties: false 49 + 50 + examples: 51 + - | 52 + soc { 53 + #address-cells = <2>; 54 + #size-cells = <2>; 55 + 56 + system-controller@10012000 { 57 + compatible = "mediatek,mt8195-dvfsrc"; 58 + reg = <0 0x10012000 0 0x1000>; 59 + 60 + regulators { 61 + compatible = "mediatek,mt8195-dvfsrc-regulator"; 62 + 63 + dvfsrc_vcore: dvfsrc-vcore { 64 + regulator-name = "dvfsrc-vcore"; 65 + regulator-min-microvolt = <550000>; 66 + regulator-max-microvolt = <750000>; 67 + regulator-always-on; 68 + }; 69 + 70 + dvfsrc_vscp: dvfsrc-vscp { 71 + regulator-name = "dvfsrc-vscp"; 72 + regulator-min-microvolt = <550000>; 73 + regulator-max-microvolt = <750000>; 74 + regulator-always-on; 75 + }; 76 + }; 77 + 78 + emi_icc: interconnect { 79 + compatible = "mediatek,mt8195-emi"; 80 + #interconnect-cells = <1>; 81 + }; 82 + }; 83 + };
+11
drivers/soc/mediatek/Kconfig
··· 26 26 The violation information is logged for further analysis or 27 27 countermeasures. 28 28 29 + config MTK_DVFSRC 30 + tristate "MediaTek DVFSRC Support" 31 + depends on ARCH_MEDIATEK 32 + help 33 + Say yes here to add support for the MediaTek Dynamic Voltage 34 + and Frequency Scaling Resource Collector (DVFSRC): a HW 35 + IP found on many MediaTek SoCs, which is responsible for 36 + collecting DVFS requests from various SoC IPs, other than 37 + software, and performing bandwidth scaling to provide the 38 + best achievable performance-per-watt. 39 + 29 40 config MTK_INFRACFG 30 41 bool "MediaTek INFRACFG Support" 31 42 select REGMAP
+1
drivers/soc/mediatek/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o 3 3 obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o 4 + obj-$(CONFIG_MTK_DVFSRC) += mtk-dvfsrc.o 4 5 obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o 5 6 obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o 6 7 obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
+106 -124
drivers/soc/mediatek/mtk-cmdq-helper.c
··· 180 180 return 0; 181 181 } 182 182 183 + static int cmdq_pkt_mask(struct cmdq_pkt *pkt, u32 mask) 184 + { 185 + struct cmdq_instruction inst = { 186 + .op = CMDQ_CODE_MASK, 187 + .mask = ~mask 188 + }; 189 + return cmdq_pkt_append_command(pkt, inst); 190 + } 191 + 183 192 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) 184 193 { 185 - struct cmdq_instruction inst; 186 - 187 - inst.op = CMDQ_CODE_WRITE; 188 - inst.value = value; 189 - inst.offset = offset; 190 - inst.subsys = subsys; 191 - 194 + struct cmdq_instruction inst = { 195 + .op = CMDQ_CODE_WRITE, 196 + .value = value, 197 + .offset = offset, 198 + .subsys = subsys 199 + }; 192 200 return cmdq_pkt_append_command(pkt, inst); 193 201 } 194 202 EXPORT_SYMBOL(cmdq_pkt_write); ··· 204 196 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, 205 197 u16 offset, u32 value, u32 mask) 206 198 { 207 - struct cmdq_instruction inst = { {0} }; 208 199 u16 offset_mask = offset; 209 200 int err; 210 201 211 - if (mask != 0xffffffff) { 212 - inst.op = CMDQ_CODE_MASK; 213 - inst.mask = ~mask; 214 - err = cmdq_pkt_append_command(pkt, inst); 202 + if (mask != GENMASK(31, 0)) { 203 + err = cmdq_pkt_mask(pkt, mask); 215 204 if (err < 0) 216 205 return err; 217 206 218 207 offset_mask |= CMDQ_WRITE_ENABLE_MASK; 219 208 } 220 - err = cmdq_pkt_write(pkt, subsys, offset_mask, value); 221 - 222 - return err; 209 + return cmdq_pkt_write(pkt, subsys, offset_mask, value); 223 210 } 224 211 EXPORT_SYMBOL(cmdq_pkt_write_mask); 225 212 226 213 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low, 227 214 u16 reg_idx) 228 215 { 229 - struct cmdq_instruction inst = {}; 230 - 231 - inst.op = CMDQ_CODE_READ_S; 232 - inst.dst_t = CMDQ_REG_TYPE; 233 - inst.sop = high_addr_reg_idx; 234 - inst.reg_dst = reg_idx; 235 - inst.src_reg = addr_low; 236 - 216 + struct cmdq_instruction inst = { 217 + .op = CMDQ_CODE_READ_S, 218 + .dst_t = CMDQ_REG_TYPE, 219 + .sop = high_addr_reg_idx, 220 + .reg_dst = reg_idx, 221 + .src_reg = addr_low 222 + }; 237 223 return cmdq_pkt_append_command(pkt, inst); 238 224 } 239 225 EXPORT_SYMBOL(cmdq_pkt_read_s); ··· 235 233 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, 236 234 u16 addr_low, u16 src_reg_idx) 237 235 { 238 - struct cmdq_instruction inst = {}; 239 - 240 - inst.op = CMDQ_CODE_WRITE_S; 241 - inst.src_t = CMDQ_REG_TYPE; 242 - inst.sop = high_addr_reg_idx; 243 - inst.offset = addr_low; 244 - inst.src_reg = src_reg_idx; 245 - 236 + struct cmdq_instruction inst = { 237 + .op = CMDQ_CODE_WRITE_S, 238 + .src_t = CMDQ_REG_TYPE, 239 + .sop = high_addr_reg_idx, 240 + .offset = addr_low, 241 + .src_reg = src_reg_idx 242 + }; 246 243 return cmdq_pkt_append_command(pkt, inst); 247 244 } 248 245 EXPORT_SYMBOL(cmdq_pkt_write_s); ··· 249 248 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, 250 249 u16 addr_low, u16 src_reg_idx, u32 mask) 251 250 { 252 - struct cmdq_instruction inst = {}; 251 + struct cmdq_instruction inst = { 252 + .op = CMDQ_CODE_WRITE_S_MASK, 253 + .src_t = CMDQ_REG_TYPE, 254 + .sop = high_addr_reg_idx, 255 + .offset = addr_low, 256 + .src_reg = src_reg_idx, 257 + }; 253 258 int err; 254 259 255 - inst.op = CMDQ_CODE_MASK; 256 - inst.mask = ~mask; 257 - err = cmdq_pkt_append_command(pkt, inst); 260 + err = cmdq_pkt_mask(pkt, mask); 258 261 if (err < 0) 259 262 return err; 260 - 261 - inst.mask = 0; 262 - inst.op = CMDQ_CODE_WRITE_S_MASK; 263 - inst.src_t = CMDQ_REG_TYPE; 264 - inst.sop = high_addr_reg_idx; 265 - inst.offset = addr_low; 266 - inst.src_reg = src_reg_idx; 267 263 268 264 return cmdq_pkt_append_command(pkt, inst); 269 265 } ··· 269 271 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, 270 272 u16 addr_low, u32 value) 271 273 { 272 - struct cmdq_instruction inst = {}; 273 - 274 - inst.op = CMDQ_CODE_WRITE_S; 275 - inst.sop = high_addr_reg_idx; 276 - inst.offset = addr_low; 277 - inst.value = value; 278 - 274 + struct cmdq_instruction inst = { 275 + .op = CMDQ_CODE_WRITE_S, 276 + .sop = high_addr_reg_idx, 277 + .offset = addr_low, 278 + .value = value 279 + }; 279 280 return cmdq_pkt_append_command(pkt, inst); 280 281 } 281 282 EXPORT_SYMBOL(cmdq_pkt_write_s_value); ··· 282 285 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, 283 286 u16 addr_low, u32 value, u32 mask) 284 287 { 285 - struct cmdq_instruction inst = {}; 288 + struct cmdq_instruction inst = { 289 + .op = CMDQ_CODE_WRITE_S_MASK, 290 + .sop = high_addr_reg_idx, 291 + .offset = addr_low, 292 + .value = value 293 + }; 286 294 int err; 287 295 288 - inst.op = CMDQ_CODE_MASK; 289 - inst.mask = ~mask; 290 - err = cmdq_pkt_append_command(pkt, inst); 296 + err = cmdq_pkt_mask(pkt, mask); 291 297 if (err < 0) 292 298 return err; 293 - 294 - inst.op = CMDQ_CODE_WRITE_S_MASK; 295 - inst.sop = high_addr_reg_idx; 296 - inst.offset = addr_low; 297 - inst.value = value; 298 299 299 300 return cmdq_pkt_append_command(pkt, inst); 300 301 } ··· 326 331 327 332 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear) 328 333 { 329 - struct cmdq_instruction inst = { {0} }; 330 334 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0; 335 + struct cmdq_instruction inst = { 336 + .op = CMDQ_CODE_WFE, 337 + .value = CMDQ_WFE_OPTION | clear_option, 338 + .event = event 339 + }; 331 340 332 341 if (event >= CMDQ_MAX_EVENT) 333 342 return -EINVAL; 334 - 335 - inst.op = CMDQ_CODE_WFE; 336 - inst.value = CMDQ_WFE_OPTION | clear_option; 337 - inst.event = event; 338 343 339 344 return cmdq_pkt_append_command(pkt, inst); 340 345 } ··· 342 347 343 348 int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event) 344 349 { 345 - struct cmdq_instruction inst = {}; 350 + struct cmdq_instruction inst = { 351 + .op = CMDQ_CODE_WFE, 352 + .value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT, 353 + .event = event 354 + }; 346 355 347 356 if (event >= CMDQ_MAX_EVENT) 348 357 return -EINVAL; 349 - 350 - inst.op = CMDQ_CODE_WFE; 351 - inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT; 352 - inst.event = event; 353 358 354 359 return cmdq_pkt_append_command(pkt, inst); 355 360 } ··· 357 362 358 363 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) 359 364 { 360 - struct cmdq_instruction inst = { {0} }; 365 + struct cmdq_instruction inst = { 366 + .op = CMDQ_CODE_WFE, 367 + .value = CMDQ_WFE_UPDATE, 368 + .event = event 369 + }; 361 370 362 371 if (event >= CMDQ_MAX_EVENT) 363 372 return -EINVAL; 364 - 365 - inst.op = CMDQ_CODE_WFE; 366 - inst.value = CMDQ_WFE_UPDATE; 367 - inst.event = event; 368 373 369 374 return cmdq_pkt_append_command(pkt, inst); 370 375 } ··· 372 377 373 378 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event) 374 379 { 375 - struct cmdq_instruction inst = {}; 380 + struct cmdq_instruction inst = { 381 + .op = CMDQ_CODE_WFE, 382 + .value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE, 383 + .event = event 384 + }; 376 385 377 386 if (event >= CMDQ_MAX_EVENT) 378 387 return -EINVAL; 379 - 380 - inst.op = CMDQ_CODE_WFE; 381 - inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE; 382 - inst.event = event; 383 388 384 389 return cmdq_pkt_append_command(pkt, inst); 385 390 } ··· 388 393 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, 389 394 u16 offset, u32 value) 390 395 { 391 - struct cmdq_instruction inst = { {0} }; 392 - int err; 393 - 394 - inst.op = CMDQ_CODE_POLL; 395 - inst.value = value; 396 - inst.offset = offset; 397 - inst.subsys = subsys; 398 - err = cmdq_pkt_append_command(pkt, inst); 399 - 400 - return err; 396 + struct cmdq_instruction inst = { 397 + .op = CMDQ_CODE_POLL, 398 + .value = value, 399 + .offset = offset, 400 + .subsys = subsys 401 + }; 402 + return cmdq_pkt_append_command(pkt, inst); 401 403 } 402 404 EXPORT_SYMBOL(cmdq_pkt_poll); 403 405 404 406 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, 405 407 u16 offset, u32 value, u32 mask) 406 408 { 407 - struct cmdq_instruction inst = { {0} }; 408 409 int err; 409 410 410 - inst.op = CMDQ_CODE_MASK; 411 - inst.mask = ~mask; 412 - err = cmdq_pkt_append_command(pkt, inst); 411 + err = cmdq_pkt_mask(pkt, mask); 413 412 if (err < 0) 414 413 return err; 415 414 416 415 offset = offset | CMDQ_POLL_ENABLE_MASK; 417 - err = cmdq_pkt_poll(pkt, subsys, offset, value); 418 - 419 - return err; 416 + return cmdq_pkt_poll(pkt, subsys, offset, value); 420 417 } 421 418 EXPORT_SYMBOL(cmdq_pkt_poll_mask); 422 419 ··· 423 436 * which enables use_mask bit. 424 437 */ 425 438 if (mask != GENMASK(31, 0)) { 426 - inst.op = CMDQ_CODE_MASK; 427 - inst.mask = ~mask; 428 - ret = cmdq_pkt_append_command(pkt, inst); 439 + ret = cmdq_pkt_mask(pkt, mask); 429 440 if (ret < 0) 430 441 return ret; 431 442 use_mask = CMDQ_POLL_ENABLE_MASK; ··· 462 477 enum cmdq_logic_op s_op, 463 478 struct cmdq_operand *right_operand) 464 479 { 465 - struct cmdq_instruction inst = { {0} }; 480 + struct cmdq_instruction inst; 466 481 467 482 if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX) 468 483 return -EINVAL; 469 484 485 + inst.value = 0; 470 486 inst.op = CMDQ_CODE_LOGIC; 471 487 inst.dst_t = CMDQ_REG_TYPE; 472 488 inst.src_t = cmdq_operand_get_type(left_operand); ··· 483 497 484 498 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) 485 499 { 486 - struct cmdq_instruction inst = {}; 487 - 488 - inst.op = CMDQ_CODE_LOGIC; 489 - inst.dst_t = CMDQ_REG_TYPE; 490 - inst.reg_dst = reg_idx; 491 - inst.value = value; 500 + struct cmdq_instruction inst = { 501 + .op = CMDQ_CODE_LOGIC, 502 + .dst_t = CMDQ_REG_TYPE, 503 + .reg_dst = reg_idx, 504 + .value = value 505 + }; 492 506 return cmdq_pkt_append_command(pkt, inst); 493 507 } 494 508 EXPORT_SYMBOL(cmdq_pkt_assign); 495 509 496 510 int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) 497 511 { 498 - struct cmdq_instruction inst = {}; 499 - 500 - inst.op = CMDQ_CODE_JUMP; 501 - inst.offset = CMDQ_JUMP_ABSOLUTE; 502 - inst.value = addr >> shift_pa; 512 + struct cmdq_instruction inst = { 513 + .op = CMDQ_CODE_JUMP, 514 + .offset = CMDQ_JUMP_ABSOLUTE, 515 + .value = addr >> shift_pa 516 + }; 503 517 return cmdq_pkt_append_command(pkt, inst); 504 518 } 505 519 EXPORT_SYMBOL(cmdq_pkt_jump_abs); 506 520 507 521 int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa) 508 522 { 509 - struct cmdq_instruction inst = { {0} }; 510 - 511 - inst.op = CMDQ_CODE_JUMP; 512 - inst.value = (u32)offset >> shift_pa; 523 + struct cmdq_instruction inst = { 524 + .op = CMDQ_CODE_JUMP, 525 + .value = (u32)offset >> shift_pa 526 + }; 513 527 return cmdq_pkt_append_command(pkt, inst); 514 528 } 515 529 EXPORT_SYMBOL(cmdq_pkt_jump_rel); 516 530 517 531 int cmdq_pkt_eoc(struct cmdq_pkt *pkt) 518 532 { 519 - struct cmdq_instruction inst = { {0} }; 520 - 521 - inst.op = CMDQ_CODE_EOC; 522 - inst.value = CMDQ_EOC_IRQ_EN; 533 + struct cmdq_instruction inst = { 534 + .op = CMDQ_CODE_EOC, 535 + .value = CMDQ_EOC_IRQ_EN 536 + }; 523 537 return cmdq_pkt_append_command(pkt, inst); 524 538 } 525 539 EXPORT_SYMBOL(cmdq_pkt_eoc); ··· 530 544 int err; 531 545 532 546 /* insert EOC and generate IRQ for each command iteration */ 533 - inst.op = CMDQ_CODE_EOC; 534 - inst.value = CMDQ_EOC_IRQ_EN; 535 - err = cmdq_pkt_append_command(pkt, inst); 547 + err = cmdq_pkt_eoc(pkt); 536 548 if (err < 0) 537 549 return err; 538 550 ··· 538 554 inst.op = CMDQ_CODE_JUMP; 539 555 inst.value = CMDQ_JUMP_PASS >> 540 556 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan); 541 - err = cmdq_pkt_append_command(pkt, inst); 542 - 543 - return err; 557 + return cmdq_pkt_append_command(pkt, inst); 544 558 } 545 559 EXPORT_SYMBOL(cmdq_pkt_finalize); 546 560
+545
drivers/soc/mediatek/mtk-dvfsrc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2021 MediaTek Inc. 4 + * Copyright (c) 2024 Collabora Ltd. 5 + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> 6 + */ 7 + 8 + #include <linux/arm-smccc.h> 9 + #include <linux/bitfield.h> 10 + #include <linux/iopoll.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/of_platform.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/soc/mediatek/dvfsrc.h> 16 + #include <linux/soc/mediatek/mtk_sip_svc.h> 17 + 18 + /* DVFSRC_LEVEL */ 19 + #define DVFSRC_V1_LEVEL_TARGET_LEVEL GENMASK(15, 0) 20 + #define DVFSRC_TGT_LEVEL_IDLE 0x00 21 + #define DVFSRC_V1_LEVEL_CURRENT_LEVEL GENMASK(31, 16) 22 + 23 + /* DVFSRC_SW_REQ, DVFSRC_SW_REQ2 */ 24 + #define DVFSRC_V1_SW_REQ2_DRAM_LEVEL GENMASK(1, 0) 25 + #define DVFSRC_V1_SW_REQ2_VCORE_LEVEL GENMASK(3, 2) 26 + 27 + #define DVFSRC_V2_SW_REQ_DRAM_LEVEL GENMASK(3, 0) 28 + #define DVFSRC_V2_SW_REQ_VCORE_LEVEL GENMASK(6, 4) 29 + 30 + /* DVFSRC_VCORE */ 31 + #define DVFSRC_V2_VCORE_REQ_VSCP_LEVEL GENMASK(14, 12) 32 + 33 + #define DVFSRC_POLL_TIMEOUT_US 1000 34 + #define STARTUP_TIME_US 1 35 + 36 + #define MTK_SIP_DVFSRC_INIT 0x0 37 + #define MTK_SIP_DVFSRC_START 0x1 38 + 39 + struct dvfsrc_bw_constraints { 40 + u16 max_dram_nom_bw; 41 + u16 max_dram_peak_bw; 42 + u16 max_dram_hrt_bw; 43 + }; 44 + 45 + struct dvfsrc_opp { 46 + u32 vcore_opp; 47 + u32 dram_opp; 48 + }; 49 + 50 + struct dvfsrc_opp_desc { 51 + const struct dvfsrc_opp *opps; 52 + u32 num_opp; 53 + }; 54 + 55 + struct dvfsrc_soc_data; 56 + struct mtk_dvfsrc { 57 + struct device *dev; 58 + struct platform_device *icc; 59 + struct platform_device *regulator; 60 + const struct dvfsrc_soc_data *dvd; 61 + const struct dvfsrc_opp_desc *curr_opps; 62 + void __iomem *regs; 63 + int dram_type; 64 + }; 65 + 66 + struct dvfsrc_soc_data { 67 + const int *regs; 68 + const struct dvfsrc_opp_desc *opps_desc; 69 + u32 (*get_target_level)(struct mtk_dvfsrc *dvfsrc); 70 + u32 (*get_current_level)(struct mtk_dvfsrc *dvfsrc); 71 + u32 (*get_vcore_level)(struct mtk_dvfsrc *dvfsrc); 72 + u32 (*get_vscp_level)(struct mtk_dvfsrc *dvfsrc); 73 + void (*set_dram_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); 74 + void (*set_dram_peak_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); 75 + void (*set_dram_hrt_bw)(struct mtk_dvfsrc *dvfsrc, u64 bw); 76 + void (*set_opp_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 77 + void (*set_vcore_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 78 + void (*set_vscp_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 79 + int (*wait_for_opp_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 80 + int (*wait_for_vcore_level)(struct mtk_dvfsrc *dvfsrc, u32 level); 81 + const struct dvfsrc_bw_constraints *bw_constraints; 82 + }; 83 + 84 + static u32 dvfsrc_readl(struct mtk_dvfsrc *dvfs, u32 offset) 85 + { 86 + return readl(dvfs->regs + dvfs->dvd->regs[offset]); 87 + } 88 + 89 + static void dvfsrc_writel(struct mtk_dvfsrc *dvfs, u32 offset, u32 val) 90 + { 91 + writel(val, dvfs->regs + dvfs->dvd->regs[offset]); 92 + } 93 + 94 + enum dvfsrc_regs { 95 + DVFSRC_SW_REQ, 96 + DVFSRC_SW_REQ2, 97 + DVFSRC_LEVEL, 98 + DVFSRC_TARGET_LEVEL, 99 + DVFSRC_SW_BW, 100 + DVFSRC_SW_PEAK_BW, 101 + DVFSRC_SW_HRT_BW, 102 + DVFSRC_VCORE, 103 + DVFSRC_REGS_MAX, 104 + }; 105 + 106 + static const int dvfsrc_mt8183_regs[] = { 107 + [DVFSRC_SW_REQ] = 0x4, 108 + [DVFSRC_SW_REQ2] = 0x8, 109 + [DVFSRC_LEVEL] = 0xDC, 110 + [DVFSRC_SW_BW] = 0x160, 111 + }; 112 + 113 + static const int dvfsrc_mt8195_regs[] = { 114 + [DVFSRC_SW_REQ] = 0xc, 115 + [DVFSRC_VCORE] = 0x6c, 116 + [DVFSRC_SW_PEAK_BW] = 0x278, 117 + [DVFSRC_SW_BW] = 0x26c, 118 + [DVFSRC_SW_HRT_BW] = 0x290, 119 + [DVFSRC_LEVEL] = 0xd44, 120 + [DVFSRC_TARGET_LEVEL] = 0xd48, 121 + }; 122 + 123 + static const struct dvfsrc_opp *dvfsrc_get_current_opp(struct mtk_dvfsrc *dvfsrc) 124 + { 125 + u32 level = dvfsrc->dvd->get_current_level(dvfsrc); 126 + 127 + return &dvfsrc->curr_opps->opps[level]; 128 + } 129 + 130 + static bool dvfsrc_is_idle(struct mtk_dvfsrc *dvfsrc) 131 + { 132 + if (!dvfsrc->dvd->get_target_level) 133 + return true; 134 + 135 + return dvfsrc->dvd->get_target_level(dvfsrc) == DVFSRC_TGT_LEVEL_IDLE; 136 + } 137 + 138 + static int dvfsrc_wait_for_vcore_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level) 139 + { 140 + const struct dvfsrc_opp *curr; 141 + 142 + return readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr, 143 + curr->vcore_opp >= level, STARTUP_TIME_US, 144 + DVFSRC_POLL_TIMEOUT_US); 145 + } 146 + 147 + static int dvfsrc_wait_for_opp_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level) 148 + { 149 + const struct dvfsrc_opp *target, *curr; 150 + int ret; 151 + 152 + target = &dvfsrc->curr_opps->opps[level]; 153 + ret = readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr, 154 + curr->dram_opp >= target->dram_opp && 155 + curr->vcore_opp >= target->vcore_opp, 156 + STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US); 157 + if (ret < 0) { 158 + dev_warn(dvfsrc->dev, 159 + "timeout! target OPP: %u, dram: %d, vcore: %d\n", level, 160 + curr->dram_opp, curr->vcore_opp); 161 + return ret; 162 + } 163 + 164 + return 0; 165 + } 166 + 167 + static int dvfsrc_wait_for_opp_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level) 168 + { 169 + const struct dvfsrc_opp *target, *curr; 170 + int ret; 171 + 172 + target = &dvfsrc->curr_opps->opps[level]; 173 + ret = readx_poll_timeout_atomic(dvfsrc_get_current_opp, dvfsrc, curr, 174 + curr->dram_opp >= target->dram_opp && 175 + curr->vcore_opp >= target->vcore_opp, 176 + STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US); 177 + if (ret < 0) { 178 + dev_warn(dvfsrc->dev, 179 + "timeout! target OPP: %u, dram: %d\n", level, curr->dram_opp); 180 + return ret; 181 + } 182 + 183 + return 0; 184 + } 185 + 186 + static u32 dvfsrc_get_target_level_v1(struct mtk_dvfsrc *dvfsrc) 187 + { 188 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL); 189 + 190 + return FIELD_GET(DVFSRC_V1_LEVEL_TARGET_LEVEL, val); 191 + } 192 + 193 + static u32 dvfsrc_get_current_level_v1(struct mtk_dvfsrc *dvfsrc) 194 + { 195 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL); 196 + u32 current_level = FIELD_GET(DVFSRC_V1_LEVEL_CURRENT_LEVEL, val); 197 + 198 + return ffs(current_level) - 1; 199 + } 200 + 201 + static u32 dvfsrc_get_target_level_v2(struct mtk_dvfsrc *dvfsrc) 202 + { 203 + return dvfsrc_readl(dvfsrc, DVFSRC_TARGET_LEVEL); 204 + } 205 + 206 + static u32 dvfsrc_get_current_level_v2(struct mtk_dvfsrc *dvfsrc) 207 + { 208 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_LEVEL); 209 + u32 level = ffs(val); 210 + 211 + /* Valid levels */ 212 + if (level < dvfsrc->curr_opps->num_opp) 213 + return dvfsrc->curr_opps->num_opp - level; 214 + 215 + /* Zero for level 0 or invalid level */ 216 + return 0; 217 + } 218 + 219 + static u32 dvfsrc_get_vcore_level_v1(struct mtk_dvfsrc *dvfsrc) 220 + { 221 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ2); 222 + 223 + return FIELD_GET(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, val); 224 + } 225 + 226 + static void dvfsrc_set_vcore_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level) 227 + { 228 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ2); 229 + 230 + val &= ~DVFSRC_V1_SW_REQ2_VCORE_LEVEL; 231 + val |= FIELD_PREP(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, level); 232 + 233 + dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ2, val); 234 + } 235 + 236 + static u32 dvfsrc_get_vcore_level_v2(struct mtk_dvfsrc *dvfsrc) 237 + { 238 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ); 239 + 240 + return FIELD_GET(DVFSRC_V2_SW_REQ_VCORE_LEVEL, val); 241 + } 242 + 243 + static void dvfsrc_set_vcore_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level) 244 + { 245 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_SW_REQ); 246 + 247 + val &= ~DVFSRC_V2_SW_REQ_VCORE_LEVEL; 248 + val |= FIELD_PREP(DVFSRC_V2_SW_REQ_VCORE_LEVEL, level); 249 + 250 + dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val); 251 + } 252 + 253 + static u32 dvfsrc_get_vscp_level_v2(struct mtk_dvfsrc *dvfsrc) 254 + { 255 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_VCORE); 256 + 257 + return FIELD_GET(DVFSRC_V2_VCORE_REQ_VSCP_LEVEL, val); 258 + } 259 + 260 + static void dvfsrc_set_vscp_level_v2(struct mtk_dvfsrc *dvfsrc, u32 level) 261 + { 262 + u32 val = dvfsrc_readl(dvfsrc, DVFSRC_VCORE); 263 + 264 + val &= ~DVFSRC_V2_VCORE_REQ_VSCP_LEVEL; 265 + val |= FIELD_PREP(DVFSRC_V2_VCORE_REQ_VSCP_LEVEL, level); 266 + 267 + dvfsrc_writel(dvfsrc, DVFSRC_VCORE, val); 268 + } 269 + 270 + static void __dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u32 reg, 271 + u16 max_bw, u16 min_bw, u64 bw) 272 + { 273 + u32 new_bw = (u32)div_u64(bw, 100 * 1000); 274 + 275 + /* If bw constraints (in mbps) are defined make sure to respect them */ 276 + if (max_bw) 277 + new_bw = min(new_bw, max_bw); 278 + if (min_bw && new_bw > 0) 279 + new_bw = max(new_bw, min_bw); 280 + 281 + dvfsrc_writel(dvfsrc, reg, new_bw); 282 + } 283 + 284 + static void dvfsrc_set_dram_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 285 + { 286 + u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_nom_bw; 287 + 288 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_BW, max_bw, 0, bw); 289 + }; 290 + 291 + static void dvfsrc_set_dram_peak_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 292 + { 293 + u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_peak_bw; 294 + 295 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_PEAK_BW, max_bw, 0, bw); 296 + } 297 + 298 + static void dvfsrc_set_dram_hrt_bw_v1(struct mtk_dvfsrc *dvfsrc, u64 bw) 299 + { 300 + u64 max_bw = dvfsrc->dvd->bw_constraints->max_dram_hrt_bw; 301 + 302 + __dvfsrc_set_dram_bw_v1(dvfsrc, DVFSRC_SW_HRT_BW, max_bw, 0, bw); 303 + } 304 + 305 + static void dvfsrc_set_opp_level_v1(struct mtk_dvfsrc *dvfsrc, u32 level) 306 + { 307 + const struct dvfsrc_opp *opp = &dvfsrc->curr_opps->opps[level]; 308 + u32 val; 309 + 310 + /* Translate Pstate to DVFSRC level and set it to DVFSRC HW */ 311 + val = FIELD_PREP(DVFSRC_V1_SW_REQ2_DRAM_LEVEL, opp->dram_opp); 312 + val |= FIELD_PREP(DVFSRC_V1_SW_REQ2_VCORE_LEVEL, opp->vcore_opp); 313 + 314 + dev_dbg(dvfsrc->dev, "vcore_opp: %d, dram_opp: %d\n", opp->vcore_opp, opp->dram_opp); 315 + dvfsrc_writel(dvfsrc, DVFSRC_SW_REQ, val); 316 + } 317 + 318 + int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data) 319 + { 320 + struct mtk_dvfsrc *dvfsrc = dev_get_drvdata(dev); 321 + bool state; 322 + int ret; 323 + 324 + dev_dbg(dvfsrc->dev, "cmd: %d, data: %llu\n", cmd, data); 325 + 326 + switch (cmd) { 327 + case MTK_DVFSRC_CMD_BW: 328 + dvfsrc->dvd->set_dram_bw(dvfsrc, data); 329 + return 0; 330 + case MTK_DVFSRC_CMD_HRT_BW: 331 + if (dvfsrc->dvd->set_dram_hrt_bw) 332 + dvfsrc->dvd->set_dram_hrt_bw(dvfsrc, data); 333 + return 0; 334 + case MTK_DVFSRC_CMD_PEAK_BW: 335 + if (dvfsrc->dvd->set_dram_peak_bw) 336 + dvfsrc->dvd->set_dram_peak_bw(dvfsrc, data); 337 + return 0; 338 + case MTK_DVFSRC_CMD_OPP: 339 + if (!dvfsrc->dvd->set_opp_level) 340 + return 0; 341 + 342 + dvfsrc->dvd->set_opp_level(dvfsrc, data); 343 + break; 344 + case MTK_DVFSRC_CMD_VCORE_LEVEL: 345 + dvfsrc->dvd->set_vcore_level(dvfsrc, data); 346 + break; 347 + case MTK_DVFSRC_CMD_VSCP_LEVEL: 348 + if (!dvfsrc->dvd->set_vscp_level) 349 + return 0; 350 + 351 + dvfsrc->dvd->set_vscp_level(dvfsrc, data); 352 + break; 353 + default: 354 + dev_err(dvfsrc->dev, "unknown command: %d\n", cmd); 355 + return -EOPNOTSUPP; 356 + } 357 + 358 + /* DVFSRC needs at least 2T(~196ns) to handle a request */ 359 + udelay(STARTUP_TIME_US); 360 + 361 + ret = readx_poll_timeout_atomic(dvfsrc_is_idle, dvfsrc, state, state, 362 + STARTUP_TIME_US, DVFSRC_POLL_TIMEOUT_US); 363 + if (ret < 0) { 364 + dev_warn(dvfsrc->dev, 365 + "%d: idle timeout, data: %llu, last: %d -> %d\n", cmd, data, 366 + dvfsrc->dvd->get_current_level(dvfsrc), 367 + dvfsrc->dvd->get_target_level(dvfsrc)); 368 + return ret; 369 + } 370 + 371 + if (cmd == MTK_DVFSRC_CMD_OPP) 372 + ret = dvfsrc->dvd->wait_for_opp_level(dvfsrc, data); 373 + else 374 + ret = dvfsrc->dvd->wait_for_vcore_level(dvfsrc, data); 375 + 376 + if (ret < 0) { 377 + dev_warn(dvfsrc->dev, 378 + "%d: wait timeout, data: %llu, last: %d -> %d\n", 379 + cmd, data, 380 + dvfsrc->dvd->get_current_level(dvfsrc), 381 + dvfsrc->dvd->get_target_level(dvfsrc)); 382 + return ret; 383 + } 384 + 385 + return 0; 386 + } 387 + EXPORT_SYMBOL(mtk_dvfsrc_send_request); 388 + 389 + int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data) 390 + { 391 + struct mtk_dvfsrc *dvfsrc = dev_get_drvdata(dev); 392 + 393 + switch (cmd) { 394 + case MTK_DVFSRC_CMD_VCORE_LEVEL: 395 + *data = dvfsrc->dvd->get_vcore_level(dvfsrc); 396 + break; 397 + case MTK_DVFSRC_CMD_VSCP_LEVEL: 398 + *data = dvfsrc->dvd->get_vscp_level(dvfsrc); 399 + break; 400 + default: 401 + return -EOPNOTSUPP; 402 + } 403 + 404 + return 0; 405 + } 406 + EXPORT_SYMBOL(mtk_dvfsrc_query_info); 407 + 408 + static int mtk_dvfsrc_probe(struct platform_device *pdev) 409 + { 410 + struct arm_smccc_res ares; 411 + struct mtk_dvfsrc *dvfsrc; 412 + int ret; 413 + 414 + dvfsrc = devm_kzalloc(&pdev->dev, sizeof(*dvfsrc), GFP_KERNEL); 415 + if (!dvfsrc) 416 + return -ENOMEM; 417 + 418 + dvfsrc->dvd = of_device_get_match_data(&pdev->dev); 419 + dvfsrc->dev = &pdev->dev; 420 + 421 + dvfsrc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 422 + if (IS_ERR(dvfsrc->regs)) 423 + return PTR_ERR(dvfsrc->regs); 424 + 425 + arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_INIT, 426 + 0, 0, 0, 0, 0, 0, &ares); 427 + if (ares.a0) 428 + return dev_err_probe(&pdev->dev, -EINVAL, "DVFSRC init failed: %lu\n", ares.a0); 429 + 430 + dvfsrc->dram_type = ares.a1; 431 + dev_dbg(&pdev->dev, "DRAM Type: %d\n", dvfsrc->dram_type); 432 + 433 + dvfsrc->curr_opps = &dvfsrc->dvd->opps_desc[dvfsrc->dram_type]; 434 + platform_set_drvdata(pdev, dvfsrc); 435 + 436 + ret = devm_of_platform_populate(&pdev->dev); 437 + if (ret) 438 + return dev_err_probe(&pdev->dev, ret, "Failed to populate child devices\n"); 439 + 440 + /* Everything is set up - make it run! */ 441 + arm_smccc_smc(MTK_SIP_DVFSRC_VCOREFS_CONTROL, MTK_SIP_DVFSRC_START, 442 + 0, 0, 0, 0, 0, 0, &ares); 443 + if (ares.a0) 444 + return dev_err_probe(&pdev->dev, -EINVAL, "Cannot start DVFSRC: %lu\n", ares.a0); 445 + 446 + return 0; 447 + } 448 + 449 + static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = { 450 + { 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 2 }, 451 + }; 452 + 453 + static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp3[] = { 454 + { 0, 0 }, { 0, 1 }, { 1, 1 }, { 1, 2 }, 455 + }; 456 + 457 + static const struct dvfsrc_opp_desc dvfsrc_opp_mt8183_desc[] = { 458 + [0] = { 459 + .opps = dvfsrc_opp_mt8183_lp4, 460 + .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp4), 461 + }, 462 + [1] = { 463 + .opps = dvfsrc_opp_mt8183_lp3, 464 + .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp3), 465 + }, 466 + [2] = { 467 + .opps = dvfsrc_opp_mt8183_lp3, 468 + .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8183_lp3), 469 + } 470 + }; 471 + 472 + static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8183 = { 0, 0, 0 }; 473 + 474 + static const struct dvfsrc_soc_data mt8183_data = { 475 + .opps_desc = dvfsrc_opp_mt8183_desc, 476 + .regs = dvfsrc_mt8183_regs, 477 + .get_target_level = dvfsrc_get_target_level_v1, 478 + .get_current_level = dvfsrc_get_current_level_v1, 479 + .get_vcore_level = dvfsrc_get_vcore_level_v1, 480 + .set_dram_bw = dvfsrc_set_dram_bw_v1, 481 + .set_opp_level = dvfsrc_set_opp_level_v1, 482 + .set_vcore_level = dvfsrc_set_vcore_level_v1, 483 + .wait_for_opp_level = dvfsrc_wait_for_opp_level_v1, 484 + .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, 485 + .bw_constraints = &dvfsrc_bw_constr_mt8183, 486 + }; 487 + 488 + static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = { 489 + { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 }, 490 + { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, 491 + { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 }, 492 + { 1, 3 }, { 2, 3 }, { 3, 3 }, { 1, 4 }, 493 + { 2, 4 }, { 3, 4 }, { 2, 5 }, { 3, 5 }, 494 + { 3, 6 }, 495 + }; 496 + 497 + static const struct dvfsrc_opp_desc dvfsrc_opp_mt8195_desc[] = { 498 + [0] = { 499 + .opps = dvfsrc_opp_mt8195_lp4, 500 + .num_opp = ARRAY_SIZE(dvfsrc_opp_mt8195_lp4), 501 + } 502 + }; 503 + 504 + static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8195 = { 505 + .max_dram_nom_bw = 255, 506 + .max_dram_peak_bw = 255, 507 + .max_dram_hrt_bw = 1023, 508 + }; 509 + 510 + static const struct dvfsrc_soc_data mt8195_data = { 511 + .opps_desc = dvfsrc_opp_mt8195_desc, 512 + .regs = dvfsrc_mt8195_regs, 513 + .get_target_level = dvfsrc_get_target_level_v2, 514 + .get_current_level = dvfsrc_get_current_level_v2, 515 + .get_vcore_level = dvfsrc_get_vcore_level_v2, 516 + .get_vscp_level = dvfsrc_get_vscp_level_v2, 517 + .set_dram_bw = dvfsrc_set_dram_bw_v1, 518 + .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1, 519 + .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1, 520 + .set_vcore_level = dvfsrc_set_vcore_level_v2, 521 + .set_vscp_level = dvfsrc_set_vscp_level_v2, 522 + .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2, 523 + .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, 524 + .bw_constraints = &dvfsrc_bw_constr_mt8195, 525 + }; 526 + 527 + static const struct of_device_id mtk_dvfsrc_of_match[] = { 528 + { .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data }, 529 + { .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data }, 530 + { /* sentinel */ } 531 + }; 532 + 533 + static struct platform_driver mtk_dvfsrc_driver = { 534 + .probe = mtk_dvfsrc_probe, 535 + .driver = { 536 + .name = "mtk-dvfsrc", 537 + .of_match_table = mtk_dvfsrc_of_match, 538 + }, 539 + }; 540 + module_platform_driver(mtk_dvfsrc_driver); 541 + 542 + MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); 543 + MODULE_AUTHOR("Dawei Chien <dawei.chien@mediatek.com>"); 544 + MODULE_LICENSE("GPL"); 545 + MODULE_DESCRIPTION("MediaTek DVFSRC driver");
+1
drivers/soc/mediatek/mtk-regulator-coupler.c
··· 147 147 { 148 148 if (!of_machine_is_compatible("mediatek,mt8183") && 149 149 !of_machine_is_compatible("mediatek,mt8186") && 150 + !of_machine_is_compatible("mediatek,mt8188") && 150 151 !of_machine_is_compatible("mediatek,mt8192")) 151 152 return 0; 152 153
+1 -3
drivers/soc/mediatek/mtk-svs.c
··· 2133 2133 } 2134 2134 2135 2135 pdev = of_find_device_by_node(np); 2136 + of_node_put(np); 2136 2137 if (!pdev) { 2137 - of_node_put(np); 2138 2138 dev_err(svsp->dev, "cannot find pdev by %s\n", node_name); 2139 2139 return ERR_PTR(-ENXIO); 2140 2140 } 2141 - 2142 - of_node_put(np); 2143 2141 2144 2142 return &pdev->dev; 2145 2143 }
+36
include/linux/soc/mediatek/dvfsrc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (c) 2021 MediaTek Inc. 4 + * Copyright (c) 2024 Collabora Ltd. 5 + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> 6 + */ 7 + 8 + #ifndef __MEDIATEK_DVFSRC_H 9 + #define __MEDIATEK_DVFSRC_H 10 + 11 + enum mtk_dvfsrc_cmd { 12 + MTK_DVFSRC_CMD_BW, 13 + MTK_DVFSRC_CMD_HRT_BW, 14 + MTK_DVFSRC_CMD_PEAK_BW, 15 + MTK_DVFSRC_CMD_OPP, 16 + MTK_DVFSRC_CMD_VCORE_LEVEL, 17 + MTK_DVFSRC_CMD_VSCP_LEVEL, 18 + MTK_DVFSRC_CMD_MAX, 19 + }; 20 + 21 + #if IS_ENABLED(CONFIG_MTK_DVFSRC) 22 + 23 + int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data); 24 + int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data); 25 + 26 + #else 27 + 28 + static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data) 29 + { return -ENODEV; } 30 + 31 + static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data) 32 + { return -ENODEV; } 33 + 34 + #endif /* CONFIG_MTK_DVFSRC */ 35 + 36 + #endif
+3
include/linux/soc/mediatek/mtk_sip_svc.h
··· 22 22 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \ 23 23 ARM_SMCCC_OWNER_SIP, fn_id) 24 24 25 + /* DVFSRC SMC calls */ 26 + #define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506) 27 + 25 28 /* IOMMU related SMC call */ 26 29 #define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514) 27 30