Merge branches 'powercap' and 'pm-devfreq'

* powercap:
powercap / RAPL: mark rapl_ids array as __initconst
powercap / RAPL: add ID for Broadwell server

* pm-devfreq:
PM / devfreq: tegra: Register governor on module init
PM / devfreq: tegra: Enable interrupts after resuming the devfreq monitor
PM / devfreq: tegra: Set drvdata before enabling the irq
PM / devfreq: tegra: remove operating-points
PM / devfreq: tegra: Use clock rate constraints
PM / devfreq: tegra: Update to v5 of the submitted patches
PM / devfreq: correct misleading comment
PM / devfreq: event: Add const keyword for devfreq_event_ops structure

+296 -229
-1
drivers/devfreq/devfreq.c
··· 392 /** 393 * _remove_devfreq() - Remove devfreq from the list and release its resources. 394 * @devfreq: the devfreq struct 395 - * @skip: skip calling device_unregister(). 396 */ 397 static void _remove_devfreq(struct devfreq *devfreq) 398 {
··· 392 /** 393 * _remove_devfreq() - Remove devfreq from the list and release its resources. 394 * @devfreq: the devfreq struct 395 */ 396 static void _remove_devfreq(struct devfreq *devfreq) 397 {
+1 -1
drivers/devfreq/event/exynos-ppmu.c
··· 194 return 0; 195 } 196 197 - static struct devfreq_event_ops exynos_ppmu_ops = { 198 .disable = exynos_ppmu_disable, 199 .set_event = exynos_ppmu_set_event, 200 .get_event = exynos_ppmu_get_event,
··· 194 return 0; 195 } 196 197 + static const struct devfreq_event_ops exynos_ppmu_ops = { 198 .disable = exynos_ppmu_disable, 199 .set_event = exynos_ppmu_set_event, 200 .get_event = exynos_ppmu_get_event,
+292 -225
drivers/devfreq/tegra-devfreq.c
··· 62 #define ACTMON_BELOW_WMARK_WINDOW 3 63 #define ACTMON_BOOST_FREQ_STEP 16000 64 65 - /* activity counter is incremented every 256 memory transactions, and each 66 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is 67 * 4 * 256 = 1024. 68 */ ··· 86 * struct tegra_devfreq_device_config - configuration specific to an ACTMON 87 * device 88 * 89 - * Coefficients and thresholds are in % 90 */ 91 struct tegra_devfreq_device_config { 92 u32 offset; 93 u32 irq_mask; 94 95 unsigned int boost_up_coeff; 96 unsigned int boost_down_coeff; 97 unsigned int boost_up_threshold; 98 unsigned int boost_down_threshold; 99 u32 avg_dependency_threshold; 100 }; 101 ··· 115 116 static struct tegra_devfreq_device_config actmon_device_configs[] = { 117 { 118 - /* MCALL */ 119 .offset = 0x1c0, 120 .irq_mask = 1 << 26, 121 .boost_up_coeff = 200, ··· 124 .boost_down_threshold = 40, 125 }, 126 { 127 - /* MCCPU */ 128 .offset = 0x200, 129 .irq_mask = 1 << 25, 130 .boost_up_coeff = 800, ··· 142 */ 143 struct tegra_devfreq_device { 144 const struct tegra_devfreq_device_config *config; 145 146 - void __iomem *regs; 147 - u32 avg_band_freq; 148 - u32 avg_count; 149 150 - unsigned long target_freq; 151 - unsigned long boost_freq; 152 }; 153 154 struct tegra_devfreq { 155 struct devfreq *devfreq; 156 157 - struct platform_device *pdev; 158 struct reset_control *reset; 159 struct clk *clock; 160 void __iomem *regs; 161 - 162 - spinlock_t lock; 163 164 struct clk *emc_clock; 165 unsigned long max_freq; ··· 188 { 250000, 100000 }, 189 }; 190 191 static unsigned long do_percent(unsigned long val, unsigned int pct) 192 { 193 return val * pct / 100; 194 } 195 196 - static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev) 197 { 198 u32 avg = dev->avg_count; 199 - u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD; 200 201 - writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK); 202 - avg = max(avg, band); 203 - writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK); 204 } 205 206 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra, ··· 232 { 233 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 234 235 - writel(do_percent(val, dev->config->boost_up_threshold), 236 - dev->regs + ACTMON_DEV_UPPER_WMARK); 237 238 - writel(do_percent(val, dev->config->boost_down_threshold), 239 - dev->regs + ACTMON_DEV_LOWER_WMARK); 240 } 241 242 static void actmon_write_barrier(struct tegra_devfreq *tegra) 243 { 244 /* ensure the update has reached the ACTMON */ 245 wmb(); 246 - readl(tegra->regs + ACTMON_GLB_STATUS); 247 } 248 249 - static irqreturn_t actmon_isr(int irq, void *data) 250 { 251 - struct tegra_devfreq *tegra = data; 252 - struct tegra_devfreq_device *dev = NULL; 253 unsigned long flags; 254 - u32 val; 255 - unsigned int i; 256 257 - val = readl(tegra->regs + ACTMON_GLB_STATUS); 258 259 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 260 - if (val & tegra->devices[i].config->irq_mask) { 261 - dev = tegra->devices + i; 262 - break; 263 - } 264 - } 265 266 - if (!dev) 267 - return IRQ_NONE; 268 269 - spin_lock_irqsave(&tegra->lock, flags); 270 - 271 - dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT); 272 - tegra_devfreq_update_avg_wmark(dev); 273 - 274 - val = readl(dev->regs + ACTMON_DEV_INTR_STATUS); 275 - if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) { 276 - val = readl(dev->regs + ACTMON_DEV_CTRL) | 277 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN | 278 - ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 279 - 280 /* 281 * new_boost = min(old_boost * up_coef + step, max_freq) 282 */ 283 dev->boost_freq = do_percent(dev->boost_freq, 284 dev->config->boost_up_coeff); 285 dev->boost_freq += ACTMON_BOOST_FREQ_STEP; 286 - if (dev->boost_freq >= tegra->max_freq) { 287 - dev->boost_freq = tegra->max_freq; 288 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 289 - } 290 - writel(val, dev->regs + ACTMON_DEV_CTRL); 291 - } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) { 292 - val = readl(dev->regs + ACTMON_DEV_CTRL) | 293 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN | 294 - ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 295 296 /* 297 * new_boost = old_boost * down_coef 298 * or 0 if (old_boost * down_coef < step / 2) 299 */ 300 dev->boost_freq = do_percent(dev->boost_freq, 301 dev->config->boost_down_coeff); 302 - if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) { 303 dev->boost_freq = 0; 304 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 305 - } 306 - writel(val, dev->regs + ACTMON_DEV_CTRL); 307 } 308 309 if (dev->config->avg_dependency_threshold) { 310 - val = readl(dev->regs + ACTMON_DEV_CTRL); 311 if (dev->avg_count >= dev->config->avg_dependency_threshold) 312 - val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 313 else if (dev->boost_freq == 0) 314 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 315 - writel(val, dev->regs + ACTMON_DEV_CTRL); 316 } 317 318 - writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS); 319 320 actmon_write_barrier(tegra); 321 322 - spin_unlock_irqrestore(&tegra->lock, flags); 323 324 - return IRQ_WAKE_THREAD; 325 } 326 327 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra, ··· 355 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq); 356 } 357 358 - spin_lock_irqsave(&tegra->lock, flags); 359 360 dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD; 361 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold; ··· 365 if (dev->avg_count >= dev->config->avg_dependency_threshold) 366 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq); 367 368 - spin_unlock_irqrestore(&tegra->lock, flags); 369 } 370 371 static irqreturn_t actmon_thread_isr(int irq, void *data) ··· 383 unsigned long action, void *ptr) 384 { 385 struct clk_notifier_data *data = ptr; 386 - struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq, 387 - rate_change_nb); 388 unsigned int i; 389 unsigned long flags; 390 391 - spin_lock_irqsave(&tegra->lock, flags); 392 393 - switch (action) { 394 - case POST_RATE_CHANGE: 395 - tegra->cur_freq = data->new_rate / KHZ; 396 397 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) 398 - tegra_devfreq_update_wmark(tegra, tegra->devices + i); 399 400 - actmon_write_barrier(tegra); 401 - break; 402 - case PRE_RATE_CHANGE: 403 - /* fall through */ 404 - case ABORT_RATE_CHANGE: 405 - break; 406 - }; 407 408 - spin_unlock_irqrestore(&tegra->lock, flags); 409 410 return NOTIFY_OK; 411 } 412 413 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra, 414 struct tegra_devfreq_device *dev) 415 { 416 - u32 val; 417 418 - dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ; 419 dev->target_freq = tegra->cur_freq; 420 421 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 422 - writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG); 423 424 - tegra_devfreq_update_avg_wmark(dev); 425 tegra_devfreq_update_wmark(tegra, dev); 426 427 - writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT); 428 - writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS); 429 430 - val = 0; 431 - val |= ACTMON_DEV_CTRL_ENB_PERIODIC | 432 - ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN | 433 - ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 434 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1) 435 << ACTMON_DEV_CTRL_K_VAL_SHIFT; 436 val |= (ACTMON_BELOW_WMARK_WINDOW - 1) 437 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT; 438 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1) 439 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT; 440 - val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN | 441 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 442 - 443 - writel(val, dev->regs + ACTMON_DEV_CTRL); 444 - 445 - actmon_write_barrier(tegra); 446 - 447 - val = readl(dev->regs + ACTMON_DEV_CTRL); 448 val |= ACTMON_DEV_CTRL_ENB; 449 - writel(val, dev->regs + ACTMON_DEV_CTRL); 450 451 actmon_write_barrier(tegra); 452 - } 453 - 454 - static int tegra_devfreq_suspend(struct device *dev) 455 - { 456 - struct platform_device *pdev; 457 - struct tegra_devfreq *tegra; 458 - struct tegra_devfreq_device *actmon_dev; 459 - unsigned int i; 460 - u32 val; 461 - 462 - pdev = container_of(dev, struct platform_device, dev); 463 - tegra = platform_get_drvdata(pdev); 464 - 465 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 466 - actmon_dev = &tegra->devices[i]; 467 - 468 - val = readl(actmon_dev->regs + ACTMON_DEV_CTRL); 469 - val &= ~ACTMON_DEV_CTRL_ENB; 470 - writel(val, actmon_dev->regs + ACTMON_DEV_CTRL); 471 - 472 - writel(ACTMON_INTR_STATUS_CLEAR, 473 - actmon_dev->regs + ACTMON_DEV_INTR_STATUS); 474 - 475 - actmon_write_barrier(tegra); 476 - } 477 - 478 - return 0; 479 - } 480 - 481 - static int tegra_devfreq_resume(struct device *dev) 482 - { 483 - struct platform_device *pdev; 484 - struct tegra_devfreq *tegra; 485 - struct tegra_devfreq_device *actmon_dev; 486 - unsigned int i; 487 - 488 - pdev = container_of(dev, struct platform_device, dev); 489 - tegra = platform_get_drvdata(pdev); 490 - 491 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 492 - actmon_dev = &tegra->devices[i]; 493 - 494 - tegra_actmon_configure_device(tegra, actmon_dev); 495 - } 496 - 497 - return 0; 498 } 499 500 static int tegra_devfreq_target(struct device *dev, unsigned long *freq, 501 u32 flags) 502 { 503 - struct platform_device *pdev; 504 - struct tegra_devfreq *tegra; 505 struct dev_pm_opp *opp; 506 unsigned long rate = *freq * KHZ; 507 - 508 - pdev = container_of(dev, struct platform_device, dev); 509 - tegra = platform_get_drvdata(pdev); 510 511 rcu_read_lock(); 512 opp = devfreq_recommended_opp(dev, &rate, flags); ··· 497 rate = dev_pm_opp_get_freq(opp); 498 rcu_read_unlock(); 499 500 - /* TODO: Once we have per-user clk constraints, set a floor */ 501 - clk_set_rate(tegra->emc_clock, rate); 502 - 503 - /* TODO: Set voltage as well */ 504 505 return 0; 506 } ··· 506 static int tegra_devfreq_get_dev_status(struct device *dev, 507 struct devfreq_dev_status *stat) 508 { 509 - struct platform_device *pdev; 510 - struct tegra_devfreq *tegra; 511 struct tegra_devfreq_device *actmon_dev; 512 - 513 - pdev = container_of(dev, struct platform_device, dev); 514 - tegra = platform_get_drvdata(pdev); 515 516 stat->current_frequency = tegra->cur_freq; 517 ··· 519 actmon_dev = &tegra->devices[MCALL]; 520 521 /* Number of cycles spent on memory access */ 522 - stat->busy_time = actmon_dev->avg_count; 523 524 /* The bus can be considered to be saturated way before 100% */ 525 stat->busy_time *= 100 / BUS_SATURATION_RATIO; ··· 527 /* Number of cycles in a sampling period */ 528 stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq; 529 530 return 0; 531 } 532 533 - static int tegra_devfreq_get_target(struct devfreq *devfreq, 534 - unsigned long *freq) 535 { 536 struct devfreq_dev_status stat; 537 struct tegra_devfreq *tegra; ··· 567 return 0; 568 } 569 570 - static int tegra_devfreq_event_handler(struct devfreq *devfreq, 571 - unsigned int event, void *data) 572 { 573 - return 0; 574 } 575 576 static struct devfreq_governor tegra_devfreq_governor = { 577 - .name = "tegra", 578 - .get_target_freq = tegra_devfreq_get_target, 579 - .event_handler = tegra_devfreq_event_handler, 580 - }; 581 - 582 - static struct devfreq_dev_profile tegra_devfreq_profile = { 583 - .polling_ms = 0, 584 - .target = tegra_devfreq_target, 585 - .get_dev_status = tegra_devfreq_get_dev_status, 586 }; 587 588 static int tegra_devfreq_probe(struct platform_device *pdev) ··· 611 struct tegra_devfreq *tegra; 612 struct tegra_devfreq_device *dev; 613 struct resource *res; 614 - unsigned long max_freq; 615 unsigned int i; 616 int irq; 617 int err; 618 ··· 620 if (!tegra) 621 return -ENOMEM; 622 623 - spin_lock_init(&tegra->lock); 624 - 625 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 626 - if (!res) { 627 - dev_err(&pdev->dev, "Failed to get regs resource\n"); 628 - return -ENODEV; 629 - } 630 631 tegra->regs = devm_ioremap_resource(&pdev->dev, res); 632 - if (IS_ERR(tegra->regs)) { 633 - dev_err(&pdev->dev, "Failed to get IO memory\n"); 634 return PTR_ERR(tegra->regs); 635 - } 636 637 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon"); 638 if (IS_ERR(tegra->reset)) { ··· 644 return PTR_ERR(tegra->emc_clock); 645 } 646 647 - err = of_init_opp_table(&pdev->dev); 648 - if (err) { 649 - dev_err(&pdev->dev, "Failed to init operating point table\n"); 650 - return err; 651 - } 652 653 tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb; 654 err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb); ··· 658 659 err = clk_prepare_enable(tegra->clock); 660 if (err) { 661 - reset_control_deassert(tegra->reset); 662 return err; 663 } 664 665 reset_control_deassert(tegra->reset); 666 667 - max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX); 668 - tegra->max_freq = max_freq / KHZ; 669 - 670 - clk_set_rate(tegra->emc_clock, max_freq); 671 - 672 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ; 673 674 - writel(ACTMON_SAMPLING_PERIOD - 1, 675 - tegra->regs + ACTMON_GLB_PERIOD_CTRL); 676 677 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) { 678 dev = tegra->devices + i; 679 dev->config = actmon_device_configs + i; 680 dev->regs = tegra->regs + dev->config->offset; 681 682 - tegra_actmon_configure_device(tegra, tegra->devices + i); 683 } 684 685 - err = devfreq_add_governor(&tegra_devfreq_governor); 686 - if (err) { 687 - dev_err(&pdev->dev, "Failed to add governor\n"); 688 - return err; 689 } 690 - 691 - tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock); 692 - tegra->devfreq = devm_devfreq_add_device(&pdev->dev, 693 - &tegra_devfreq_profile, 694 - "tegra", 695 - NULL); 696 697 irq = platform_get_irq(pdev, 0); 698 err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr, 699 actmon_thread_isr, IRQF_SHARED, 700 "tegra-devfreq", tegra); ··· 701 return err; 702 } 703 704 - platform_set_drvdata(pdev, tegra); 705 706 return 0; 707 } ··· 713 static int tegra_devfreq_remove(struct platform_device *pdev) 714 { 715 struct tegra_devfreq *tegra = platform_get_drvdata(pdev); 716 717 clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb); 718 ··· 734 return 0; 735 } 736 737 - static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops, 738 - tegra_devfreq_suspend, 739 - tegra_devfreq_resume); 740 - 741 - static struct of_device_id tegra_devfreq_of_match[] = { 742 { .compatible = "nvidia,tegra124-actmon" }, 743 { }, 744 }; 745 746 static struct platform_driver tegra_devfreq_driver = { 747 .probe = tegra_devfreq_probe, 748 .remove = tegra_devfreq_remove, 749 .driver = { 750 - .name = "tegra-devfreq", 751 - .owner = THIS_MODULE, 752 .of_match_table = tegra_devfreq_of_match, 753 - .pm = &tegra_devfreq_pm_ops, 754 }, 755 }; 756 - module_platform_driver(tegra_devfreq_driver); 757 758 - MODULE_LICENSE("GPL"); 759 MODULE_DESCRIPTION("Tegra devfreq driver"); 760 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>"); 761 - MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
··· 62 #define ACTMON_BELOW_WMARK_WINDOW 3 63 #define ACTMON_BOOST_FREQ_STEP 16000 64 65 + /* 66 + * Activity counter is incremented every 256 memory transactions, and each 67 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is 68 * 4 * 256 = 1024. 69 */ ··· 85 * struct tegra_devfreq_device_config - configuration specific to an ACTMON 86 * device 87 * 88 + * Coefficients and thresholds are percentages unless otherwise noted 89 */ 90 struct tegra_devfreq_device_config { 91 u32 offset; 92 u32 irq_mask; 93 94 + /* Factors applied to boost_freq every consecutive watermark breach */ 95 unsigned int boost_up_coeff; 96 unsigned int boost_down_coeff; 97 + 98 + /* Define the watermark bounds when applied to the current avg */ 99 unsigned int boost_up_threshold; 100 unsigned int boost_down_threshold; 101 + 102 + /* 103 + * Threshold of activity (cycles) below which the CPU frequency isn't 104 + * to be taken into account. This is to avoid increasing the EMC 105 + * frequency when the CPU is very busy but not accessing the bus often. 106 + */ 107 u32 avg_dependency_threshold; 108 }; 109 ··· 105 106 static struct tegra_devfreq_device_config actmon_device_configs[] = { 107 { 108 + /* MCALL: All memory accesses (including from the CPUs) */ 109 .offset = 0x1c0, 110 .irq_mask = 1 << 26, 111 .boost_up_coeff = 200, ··· 114 .boost_down_threshold = 40, 115 }, 116 { 117 + /* MCCPU: memory accesses from the CPUs */ 118 .offset = 0x200, 119 .irq_mask = 1 << 25, 120 .boost_up_coeff = 800, ··· 132 */ 133 struct tegra_devfreq_device { 134 const struct tegra_devfreq_device_config *config; 135 + void __iomem *regs; 136 + spinlock_t lock; 137 138 + /* Average event count sampled in the last interrupt */ 139 + u32 avg_count; 140 141 + /* 142 + * Extra frequency to increase the target by due to consecutive 143 + * watermark breaches. 144 + */ 145 + unsigned long boost_freq; 146 + 147 + /* Optimal frequency calculated from the stats for this device */ 148 + unsigned long target_freq; 149 }; 150 151 struct tegra_devfreq { 152 struct devfreq *devfreq; 153 154 struct reset_control *reset; 155 struct clk *clock; 156 void __iomem *regs; 157 158 struct clk *emc_clock; 159 unsigned long max_freq; ··· 174 { 250000, 100000 }, 175 }; 176 177 + static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset) 178 + { 179 + return readl(tegra->regs + offset); 180 + } 181 + 182 + static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset) 183 + { 184 + writel(val, tegra->regs + offset); 185 + } 186 + 187 + static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset) 188 + { 189 + return readl(dev->regs + offset); 190 + } 191 + 192 + static void device_writel(struct tegra_devfreq_device *dev, u32 val, 193 + u32 offset) 194 + { 195 + writel(val, dev->regs + offset); 196 + } 197 + 198 static unsigned long do_percent(unsigned long val, unsigned int pct) 199 { 200 return val * pct / 100; 201 } 202 203 + static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra, 204 + struct tegra_devfreq_device *dev) 205 { 206 u32 avg = dev->avg_count; 207 + u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ; 208 + u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD; 209 210 + device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK); 211 + 212 + avg = max(dev->avg_count, band); 213 + device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK); 214 } 215 216 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra, ··· 194 { 195 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 196 197 + device_writel(dev, do_percent(val, dev->config->boost_up_threshold), 198 + ACTMON_DEV_UPPER_WMARK); 199 200 + device_writel(dev, do_percent(val, dev->config->boost_down_threshold), 201 + ACTMON_DEV_LOWER_WMARK); 202 } 203 204 static void actmon_write_barrier(struct tegra_devfreq *tegra) 205 { 206 /* ensure the update has reached the ACTMON */ 207 wmb(); 208 + actmon_readl(tegra, ACTMON_GLB_STATUS); 209 } 210 211 + static void actmon_isr_device(struct tegra_devfreq *tegra, 212 + struct tegra_devfreq_device *dev) 213 { 214 unsigned long flags; 215 + u32 intr_status, dev_ctrl; 216 217 + spin_lock_irqsave(&dev->lock, flags); 218 219 + dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT); 220 + tegra_devfreq_update_avg_wmark(tegra, dev); 221 222 + intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS); 223 + dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL); 224 225 + if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) { 226 /* 227 * new_boost = min(old_boost * up_coef + step, max_freq) 228 */ 229 dev->boost_freq = do_percent(dev->boost_freq, 230 dev->config->boost_up_coeff); 231 dev->boost_freq += ACTMON_BOOST_FREQ_STEP; 232 233 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 234 + 235 + if (dev->boost_freq >= tegra->max_freq) 236 + dev->boost_freq = tegra->max_freq; 237 + else 238 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 239 + } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) { 240 /* 241 * new_boost = old_boost * down_coef 242 * or 0 if (old_boost * down_coef < step / 2) 243 */ 244 dev->boost_freq = do_percent(dev->boost_freq, 245 dev->config->boost_down_coeff); 246 + 247 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 248 + 249 + if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) 250 dev->boost_freq = 0; 251 + else 252 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 253 } 254 255 if (dev->config->avg_dependency_threshold) { 256 if (dev->avg_count >= dev->config->avg_dependency_threshold) 257 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 258 else if (dev->boost_freq == 0) 259 + dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 260 } 261 262 + device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL); 263 + 264 + device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); 265 266 actmon_write_barrier(tegra); 267 268 + spin_unlock_irqrestore(&dev->lock, flags); 269 + } 270 271 + static irqreturn_t actmon_isr(int irq, void *data) 272 + { 273 + struct tegra_devfreq *tegra = data; 274 + bool handled = false; 275 + unsigned int i; 276 + u32 val; 277 + 278 + val = actmon_readl(tegra, ACTMON_GLB_STATUS); 279 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 280 + if (val & tegra->devices[i].config->irq_mask) { 281 + actmon_isr_device(tegra, tegra->devices + i); 282 + handled = true; 283 + } 284 + } 285 + 286 + return handled ? IRQ_WAKE_THREAD : IRQ_NONE; 287 } 288 289 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra, ··· 317 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq); 318 } 319 320 + spin_lock_irqsave(&dev->lock, flags); 321 322 dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD; 323 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold; ··· 327 if (dev->avg_count >= dev->config->avg_dependency_threshold) 328 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq); 329 330 + spin_unlock_irqrestore(&dev->lock, flags); 331 } 332 333 static irqreturn_t actmon_thread_isr(int irq, void *data) ··· 345 unsigned long action, void *ptr) 346 { 347 struct clk_notifier_data *data = ptr; 348 + struct tegra_devfreq *tegra; 349 + struct tegra_devfreq_device *dev; 350 unsigned int i; 351 unsigned long flags; 352 353 + if (action != POST_RATE_CHANGE) 354 + return NOTIFY_OK; 355 356 + tegra = container_of(nb, struct tegra_devfreq, rate_change_nb); 357 358 + tegra->cur_freq = data->new_rate / KHZ; 359 360 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 361 + dev = &tegra->devices[i]; 362 363 + spin_lock_irqsave(&dev->lock, flags); 364 + tegra_devfreq_update_wmark(tegra, dev); 365 + spin_unlock_irqrestore(&dev->lock, flags); 366 + } 367 + 368 + actmon_write_barrier(tegra); 369 370 return NOTIFY_OK; 371 + } 372 + 373 + static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra) 374 + { 375 + struct tegra_devfreq_device *dev; 376 + u32 val; 377 + unsigned int i; 378 + 379 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 380 + dev = &tegra->devices[i]; 381 + 382 + val = device_readl(dev, ACTMON_DEV_CTRL); 383 + val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN; 384 + val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 385 + val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 386 + val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 387 + 388 + device_writel(dev, val, ACTMON_DEV_CTRL); 389 + } 390 + 391 + actmon_write_barrier(tegra); 392 + } 393 + 394 + static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra) 395 + { 396 + struct tegra_devfreq_device *dev; 397 + u32 val; 398 + unsigned int i; 399 + 400 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 401 + dev = &tegra->devices[i]; 402 + 403 + val = device_readl(dev, ACTMON_DEV_CTRL); 404 + val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN; 405 + val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 406 + val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 407 + val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 408 + 409 + device_writel(dev, val, ACTMON_DEV_CTRL); 410 + } 411 + 412 + actmon_write_barrier(tegra); 413 } 414 415 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra, 416 struct tegra_devfreq_device *dev) 417 { 418 + u32 val = 0; 419 420 dev->target_freq = tegra->cur_freq; 421 422 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 423 + device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG); 424 425 + tegra_devfreq_update_avg_wmark(tegra, dev); 426 tegra_devfreq_update_wmark(tegra, dev); 427 428 + device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT); 429 + device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); 430 431 + val |= ACTMON_DEV_CTRL_ENB_PERIODIC; 432 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1) 433 << ACTMON_DEV_CTRL_K_VAL_SHIFT; 434 val |= (ACTMON_BELOW_WMARK_WINDOW - 1) 435 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT; 436 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1) 437 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT; 438 val |= ACTMON_DEV_CTRL_ENB; 439 + 440 + device_writel(dev, val, ACTMON_DEV_CTRL); 441 442 actmon_write_barrier(tegra); 443 } 444 445 static int tegra_devfreq_target(struct device *dev, unsigned long *freq, 446 u32 flags) 447 { 448 + struct tegra_devfreq *tegra = dev_get_drvdata(dev); 449 struct dev_pm_opp *opp; 450 unsigned long rate = *freq * KHZ; 451 452 rcu_read_lock(); 453 opp = devfreq_recommended_opp(dev, &rate, flags); ··· 480 rate = dev_pm_opp_get_freq(opp); 481 rcu_read_unlock(); 482 483 + clk_set_min_rate(tegra->emc_clock, rate); 484 + clk_set_rate(tegra->emc_clock, 0); 485 486 return 0; 487 } ··· 491 static int tegra_devfreq_get_dev_status(struct device *dev, 492 struct devfreq_dev_status *stat) 493 { 494 + struct tegra_devfreq *tegra = dev_get_drvdata(dev); 495 struct tegra_devfreq_device *actmon_dev; 496 497 stat->current_frequency = tegra->cur_freq; 498 ··· 508 actmon_dev = &tegra->devices[MCALL]; 509 510 /* Number of cycles spent on memory access */ 511 + stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT); 512 513 /* The bus can be considered to be saturated way before 100% */ 514 stat->busy_time *= 100 / BUS_SATURATION_RATIO; ··· 516 /* Number of cycles in a sampling period */ 517 stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq; 518 519 + stat->busy_time = min(stat->busy_time, stat->total_time); 520 + 521 return 0; 522 } 523 524 + static struct devfreq_dev_profile tegra_devfreq_profile = { 525 + .polling_ms = 0, 526 + .target = tegra_devfreq_target, 527 + .get_dev_status = tegra_devfreq_get_dev_status, 528 + }; 529 + 530 + static int tegra_governor_get_target(struct devfreq *devfreq, 531 + unsigned long *freq) 532 { 533 struct devfreq_dev_status stat; 534 struct tegra_devfreq *tegra; ··· 548 return 0; 549 } 550 551 + static int tegra_governor_event_handler(struct devfreq *devfreq, 552 + unsigned int event, void *data) 553 { 554 + struct tegra_devfreq *tegra; 555 + int ret = 0; 556 + 557 + tegra = dev_get_drvdata(devfreq->dev.parent); 558 + 559 + switch (event) { 560 + case DEVFREQ_GOV_START: 561 + devfreq_monitor_start(devfreq); 562 + tegra_actmon_enable_interrupts(tegra); 563 + break; 564 + 565 + case DEVFREQ_GOV_STOP: 566 + tegra_actmon_disable_interrupts(tegra); 567 + devfreq_monitor_stop(devfreq); 568 + break; 569 + 570 + case DEVFREQ_GOV_SUSPEND: 571 + tegra_actmon_disable_interrupts(tegra); 572 + devfreq_monitor_suspend(devfreq); 573 + break; 574 + 575 + case DEVFREQ_GOV_RESUME: 576 + devfreq_monitor_resume(devfreq); 577 + tegra_actmon_enable_interrupts(tegra); 578 + break; 579 + } 580 + 581 + return ret; 582 } 583 584 static struct devfreq_governor tegra_devfreq_governor = { 585 + .name = "tegra_actmon", 586 + .get_target_freq = tegra_governor_get_target, 587 + .event_handler = tegra_governor_event_handler, 588 }; 589 590 static int tegra_devfreq_probe(struct platform_device *pdev) ··· 571 struct tegra_devfreq *tegra; 572 struct tegra_devfreq_device *dev; 573 struct resource *res; 574 unsigned int i; 575 + unsigned long rate; 576 int irq; 577 int err; 578 ··· 580 if (!tegra) 581 return -ENOMEM; 582 583 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 584 585 tegra->regs = devm_ioremap_resource(&pdev->dev, res); 586 + if (IS_ERR(tegra->regs)) 587 return PTR_ERR(tegra->regs); 588 589 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon"); 590 if (IS_ERR(tegra->reset)) { ··· 612 return PTR_ERR(tegra->emc_clock); 613 } 614 615 + clk_set_rate(tegra->emc_clock, ULONG_MAX); 616 617 tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb; 618 err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb); ··· 630 631 err = clk_prepare_enable(tegra->clock); 632 if (err) { 633 + dev_err(&pdev->dev, 634 + "Failed to prepare and enable ACTMON clock\n"); 635 return err; 636 } 637 638 reset_control_deassert(tegra->reset); 639 640 + tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ; 641 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ; 642 643 + actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1, 644 + ACTMON_GLB_PERIOD_CTRL); 645 646 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) { 647 dev = tegra->devices + i; 648 dev->config = actmon_device_configs + i; 649 dev->regs = tegra->regs + dev->config->offset; 650 + spin_lock_init(&dev->lock); 651 652 + tegra_actmon_configure_device(tegra, dev); 653 } 654 655 + for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) { 656 + rate = clk_round_rate(tegra->emc_clock, rate); 657 + dev_pm_opp_add(&pdev->dev, rate, 0); 658 } 659 660 irq = platform_get_irq(pdev, 0); 661 + if (irq <= 0) { 662 + dev_err(&pdev->dev, "Failed to get IRQ\n"); 663 + return -ENODEV; 664 + } 665 + 666 + platform_set_drvdata(pdev, tegra); 667 + 668 err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr, 669 actmon_thread_isr, IRQF_SHARED, 670 "tegra-devfreq", tegra); ··· 675 return err; 676 } 677 678 + tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock); 679 + tegra->devfreq = devm_devfreq_add_device(&pdev->dev, 680 + &tegra_devfreq_profile, 681 + "tegra_actmon", 682 + NULL); 683 684 return 0; 685 } ··· 683 static int tegra_devfreq_remove(struct platform_device *pdev) 684 { 685 struct tegra_devfreq *tegra = platform_get_drvdata(pdev); 686 + int irq = platform_get_irq(pdev, 0); 687 + u32 val; 688 + unsigned int i; 689 + 690 + for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) { 691 + val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL); 692 + val &= ~ACTMON_DEV_CTRL_ENB; 693 + device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL); 694 + } 695 + 696 + actmon_write_barrier(tegra); 697 + 698 + devm_free_irq(&pdev->dev, irq, tegra); 699 700 clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb); 701 ··· 691 return 0; 692 } 693 694 + static const struct of_device_id tegra_devfreq_of_match[] = { 695 { .compatible = "nvidia,tegra124-actmon" }, 696 { }, 697 }; 698 + 699 + MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match); 700 701 static struct platform_driver tegra_devfreq_driver = { 702 .probe = tegra_devfreq_probe, 703 .remove = tegra_devfreq_remove, 704 .driver = { 705 + .name = "tegra-devfreq", 706 .of_match_table = tegra_devfreq_of_match, 707 }, 708 }; 709 710 + static int __init tegra_devfreq_init(void) 711 + { 712 + int ret = 0; 713 + 714 + ret = devfreq_add_governor(&tegra_devfreq_governor); 715 + if (ret) { 716 + pr_err("%s: failed to add governor: %d\n", __func__, ret); 717 + return ret; 718 + } 719 + 720 + ret = platform_driver_register(&tegra_devfreq_driver); 721 + if (ret) 722 + devfreq_remove_governor(&tegra_devfreq_governor); 723 + 724 + return ret; 725 + } 726 + module_init(tegra_devfreq_init) 727 + 728 + static void __exit tegra_devfreq_exit(void) 729 + { 730 + int ret = 0; 731 + 732 + platform_driver_unregister(&tegra_devfreq_driver); 733 + 734 + ret = devfreq_remove_governor(&tegra_devfreq_governor); 735 + if (ret) 736 + pr_err("%s: failed to remove governor: %d\n", __func__, ret); 737 + } 738 + module_exit(tegra_devfreq_exit) 739 + 740 + MODULE_LICENSE("GPL v2"); 741 MODULE_DESCRIPTION("Tegra devfreq driver"); 742 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
+2 -1
drivers/powercap/intel_rapl.c
··· 1054 .driver_data = (kernel_ulong_t)&_ops, \ 1055 } 1056 1057 - static const struct x86_cpu_id rapl_ids[] = { 1058 RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */ 1059 RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */ 1060 RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */ ··· 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1065 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1066 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1067 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
··· 1054 .driver_data = (kernel_ulong_t)&_ops, \ 1055 } 1056 1057 + static const struct x86_cpu_id rapl_ids[] __initconst = { 1058 RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */ 1059 RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */ 1060 RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */ ··· 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1065 + RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1066 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1067 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1068 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+1 -1
include/linux/devfreq-event.h
··· 91 const char *name; 92 void *driver_data; 93 94 - struct devfreq_event_ops *ops; 95 }; 96 97 #if defined(CONFIG_PM_DEVFREQ_EVENT)
··· 91 const char *name; 92 void *driver_data; 93 94 + const struct devfreq_event_ops *ops; 95 }; 96 97 #if defined(CONFIG_PM_DEVFREQ_EVENT)