Merge branches 'powercap' and 'pm-devfreq'

* powercap:
powercap / RAPL: mark rapl_ids array as __initconst
powercap / RAPL: add ID for Broadwell server

* pm-devfreq:
PM / devfreq: tegra: Register governor on module init
PM / devfreq: tegra: Enable interrupts after resuming the devfreq monitor
PM / devfreq: tegra: Set drvdata before enabling the irq
PM / devfreq: tegra: remove operating-points
PM / devfreq: tegra: Use clock rate constraints
PM / devfreq: tegra: Update to v5 of the submitted patches
PM / devfreq: correct misleading comment
PM / devfreq: event: Add const keyword for devfreq_event_ops structure

+296 -229
-1
drivers/devfreq/devfreq.c
··· 392 392 /** 393 393 * _remove_devfreq() - Remove devfreq from the list and release its resources. 394 394 * @devfreq: the devfreq struct 395 - * @skip: skip calling device_unregister(). 396 395 */ 397 396 static void _remove_devfreq(struct devfreq *devfreq) 398 397 {
+1 -1
drivers/devfreq/event/exynos-ppmu.c
··· 194 194 return 0; 195 195 } 196 196 197 - static struct devfreq_event_ops exynos_ppmu_ops = { 197 + static const struct devfreq_event_ops exynos_ppmu_ops = { 198 198 .disable = exynos_ppmu_disable, 199 199 .set_event = exynos_ppmu_set_event, 200 200 .get_event = exynos_ppmu_get_event,
+292 -225
drivers/devfreq/tegra-devfreq.c
··· 62 62 #define ACTMON_BELOW_WMARK_WINDOW 3 63 63 #define ACTMON_BOOST_FREQ_STEP 16000 64 64 65 - /* activity counter is incremented every 256 memory transactions, and each 65 + /* 66 + * Activity counter is incremented every 256 memory transactions, and each 66 67 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is 67 68 * 4 * 256 = 1024. 68 69 */ ··· 86 85 * struct tegra_devfreq_device_config - configuration specific to an ACTMON 87 86 * device 88 87 * 89 - * Coefficients and thresholds are in % 88 + * Coefficients and thresholds are percentages unless otherwise noted 90 89 */ 91 90 struct tegra_devfreq_device_config { 92 91 u32 offset; 93 92 u32 irq_mask; 94 93 94 + /* Factors applied to boost_freq every consecutive watermark breach */ 95 95 unsigned int boost_up_coeff; 96 96 unsigned int boost_down_coeff; 97 + 98 + /* Define the watermark bounds when applied to the current avg */ 97 99 unsigned int boost_up_threshold; 98 100 unsigned int boost_down_threshold; 101 + 102 + /* 103 + * Threshold of activity (cycles) below which the CPU frequency isn't 104 + * to be taken into account. This is to avoid increasing the EMC 105 + * frequency when the CPU is very busy but not accessing the bus often. 106 + */ 99 107 u32 avg_dependency_threshold; 100 108 }; 101 109 ··· 115 105 116 106 static struct tegra_devfreq_device_config actmon_device_configs[] = { 117 107 { 118 - /* MCALL */ 108 + /* MCALL: All memory accesses (including from the CPUs) */ 119 109 .offset = 0x1c0, 120 110 .irq_mask = 1 << 26, 121 111 .boost_up_coeff = 200, ··· 124 114 .boost_down_threshold = 40, 125 115 }, 126 116 { 127 - /* MCCPU */ 117 + /* MCCPU: memory accesses from the CPUs */ 128 118 .offset = 0x200, 129 119 .irq_mask = 1 << 25, 130 120 .boost_up_coeff = 800, ··· 142 132 */ 143 133 struct tegra_devfreq_device { 144 134 const struct tegra_devfreq_device_config *config; 135 + void __iomem *regs; 136 + spinlock_t lock; 145 137 146 - void __iomem *regs; 147 - u32 avg_band_freq; 148 - u32 avg_count; 138 + /* Average event count sampled in the last interrupt */ 139 + u32 avg_count; 149 140 150 - unsigned long target_freq; 151 - unsigned long boost_freq; 141 + /* 142 + * Extra frequency to increase the target by due to consecutive 143 + * watermark breaches. 144 + */ 145 + unsigned long boost_freq; 146 + 147 + /* Optimal frequency calculated from the stats for this device */ 148 + unsigned long target_freq; 152 149 }; 153 150 154 151 struct tegra_devfreq { 155 152 struct devfreq *devfreq; 156 153 157 - struct platform_device *pdev; 158 154 struct reset_control *reset; 159 155 struct clk *clock; 160 156 void __iomem *regs; 161 - 162 - spinlock_t lock; 163 157 164 158 struct clk *emc_clock; 165 159 unsigned long max_freq; ··· 188 174 { 250000, 100000 }, 189 175 }; 190 176 177 + static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset) 178 + { 179 + return readl(tegra->regs + offset); 180 + } 181 + 182 + static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset) 183 + { 184 + writel(val, tegra->regs + offset); 185 + } 186 + 187 + static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset) 188 + { 189 + return readl(dev->regs + offset); 190 + } 191 + 192 + static void device_writel(struct tegra_devfreq_device *dev, u32 val, 193 + u32 offset) 194 + { 195 + writel(val, dev->regs + offset); 196 + } 197 + 191 198 static unsigned long do_percent(unsigned long val, unsigned int pct) 192 199 { 193 200 return val * pct / 100; 194 201 } 195 202 196 - static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev) 203 + static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra, 204 + struct tegra_devfreq_device *dev) 197 205 { 198 206 u32 avg = dev->avg_count; 199 - u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD; 207 + u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ; 208 + u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD; 200 209 201 - writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK); 202 - avg = max(avg, band); 203 - writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK); 210 + device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK); 211 + 212 + avg = max(dev->avg_count, band); 213 + device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK); 204 214 } 205 215 206 216 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra, ··· 232 194 { 233 195 u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 234 196 235 - writel(do_percent(val, dev->config->boost_up_threshold), 236 - dev->regs + ACTMON_DEV_UPPER_WMARK); 197 + device_writel(dev, do_percent(val, dev->config->boost_up_threshold), 198 + ACTMON_DEV_UPPER_WMARK); 237 199 238 - writel(do_percent(val, dev->config->boost_down_threshold), 239 - dev->regs + ACTMON_DEV_LOWER_WMARK); 200 + device_writel(dev, do_percent(val, dev->config->boost_down_threshold), 201 + ACTMON_DEV_LOWER_WMARK); 240 202 } 241 203 242 204 static void actmon_write_barrier(struct tegra_devfreq *tegra) 243 205 { 244 206 /* ensure the update has reached the ACTMON */ 245 207 wmb(); 246 - readl(tegra->regs + ACTMON_GLB_STATUS); 208 + actmon_readl(tegra, ACTMON_GLB_STATUS); 247 209 } 248 210 249 - static irqreturn_t actmon_isr(int irq, void *data) 211 + static void actmon_isr_device(struct tegra_devfreq *tegra, 212 + struct tegra_devfreq_device *dev) 250 213 { 251 - struct tegra_devfreq *tegra = data; 252 - struct tegra_devfreq_device *dev = NULL; 253 214 unsigned long flags; 254 - u32 val; 255 - unsigned int i; 215 + u32 intr_status, dev_ctrl; 256 216 257 - val = readl(tegra->regs + ACTMON_GLB_STATUS); 217 + spin_lock_irqsave(&dev->lock, flags); 258 218 259 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 260 - if (val & tegra->devices[i].config->irq_mask) { 261 - dev = tegra->devices + i; 262 - break; 263 - } 264 - } 219 + dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT); 220 + tegra_devfreq_update_avg_wmark(tegra, dev); 265 221 266 - if (!dev) 267 - return IRQ_NONE; 222 + intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS); 223 + dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL); 268 224 269 - spin_lock_irqsave(&tegra->lock, flags); 270 - 271 - dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT); 272 - tegra_devfreq_update_avg_wmark(dev); 273 - 274 - val = readl(dev->regs + ACTMON_DEV_INTR_STATUS); 275 - if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) { 276 - val = readl(dev->regs + ACTMON_DEV_CTRL) | 277 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN | 278 - ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 279 - 225 + if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) { 280 226 /* 281 227 * new_boost = min(old_boost * up_coef + step, max_freq) 282 228 */ 283 229 dev->boost_freq = do_percent(dev->boost_freq, 284 230 dev->config->boost_up_coeff); 285 231 dev->boost_freq += ACTMON_BOOST_FREQ_STEP; 286 - if (dev->boost_freq >= tegra->max_freq) { 287 - dev->boost_freq = tegra->max_freq; 288 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 289 - } 290 - writel(val, dev->regs + ACTMON_DEV_CTRL); 291 - } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) { 292 - val = readl(dev->regs + ACTMON_DEV_CTRL) | 293 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN | 294 - ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 295 232 233 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 234 + 235 + if (dev->boost_freq >= tegra->max_freq) 236 + dev->boost_freq = tegra->max_freq; 237 + else 238 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 239 + } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) { 296 240 /* 297 241 * new_boost = old_boost * down_coef 298 242 * or 0 if (old_boost * down_coef < step / 2) 299 243 */ 300 244 dev->boost_freq = do_percent(dev->boost_freq, 301 245 dev->config->boost_down_coeff); 302 - if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) { 246 + 247 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 248 + 249 + if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) 303 250 dev->boost_freq = 0; 304 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 305 - } 306 - writel(val, dev->regs + ACTMON_DEV_CTRL); 251 + else 252 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 307 253 } 308 254 309 255 if (dev->config->avg_dependency_threshold) { 310 - val = readl(dev->regs + ACTMON_DEV_CTRL); 311 256 if (dev->avg_count >= dev->config->avg_dependency_threshold) 312 - val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 257 + dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 313 258 else if (dev->boost_freq == 0) 314 - val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 315 - writel(val, dev->regs + ACTMON_DEV_CTRL); 259 + dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 316 260 } 317 261 318 - writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS); 262 + device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL); 263 + 264 + device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); 319 265 320 266 actmon_write_barrier(tegra); 321 267 322 - spin_unlock_irqrestore(&tegra->lock, flags); 268 + spin_unlock_irqrestore(&dev->lock, flags); 269 + } 323 270 324 - return IRQ_WAKE_THREAD; 271 + static irqreturn_t actmon_isr(int irq, void *data) 272 + { 273 + struct tegra_devfreq *tegra = data; 274 + bool handled = false; 275 + unsigned int i; 276 + u32 val; 277 + 278 + val = actmon_readl(tegra, ACTMON_GLB_STATUS); 279 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 280 + if (val & tegra->devices[i].config->irq_mask) { 281 + actmon_isr_device(tegra, tegra->devices + i); 282 + handled = true; 283 + } 284 + } 285 + 286 + return handled ? IRQ_WAKE_THREAD : IRQ_NONE; 325 287 } 326 288 327 289 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra, ··· 355 317 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq); 356 318 } 357 319 358 - spin_lock_irqsave(&tegra->lock, flags); 320 + spin_lock_irqsave(&dev->lock, flags); 359 321 360 322 dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD; 361 323 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold; ··· 365 327 if (dev->avg_count >= dev->config->avg_dependency_threshold) 366 328 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq); 367 329 368 - spin_unlock_irqrestore(&tegra->lock, flags); 330 + spin_unlock_irqrestore(&dev->lock, flags); 369 331 } 370 332 371 333 static irqreturn_t actmon_thread_isr(int irq, void *data) ··· 383 345 unsigned long action, void *ptr) 384 346 { 385 347 struct clk_notifier_data *data = ptr; 386 - struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq, 387 - rate_change_nb); 348 + struct tegra_devfreq *tegra; 349 + struct tegra_devfreq_device *dev; 388 350 unsigned int i; 389 351 unsigned long flags; 390 352 391 - spin_lock_irqsave(&tegra->lock, flags); 353 + if (action != POST_RATE_CHANGE) 354 + return NOTIFY_OK; 392 355 393 - switch (action) { 394 - case POST_RATE_CHANGE: 395 - tegra->cur_freq = data->new_rate / KHZ; 356 + tegra = container_of(nb, struct tegra_devfreq, rate_change_nb); 396 357 397 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) 398 - tegra_devfreq_update_wmark(tegra, tegra->devices + i); 358 + tegra->cur_freq = data->new_rate / KHZ; 399 359 400 - actmon_write_barrier(tegra); 401 - break; 402 - case PRE_RATE_CHANGE: 403 - /* fall through */ 404 - case ABORT_RATE_CHANGE: 405 - break; 406 - }; 360 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 361 + dev = &tegra->devices[i]; 407 362 408 - spin_unlock_irqrestore(&tegra->lock, flags); 363 + spin_lock_irqsave(&dev->lock, flags); 364 + tegra_devfreq_update_wmark(tegra, dev); 365 + spin_unlock_irqrestore(&dev->lock, flags); 366 + } 367 + 368 + actmon_write_barrier(tegra); 409 369 410 370 return NOTIFY_OK; 371 + } 372 + 373 + static void tegra_actmon_enable_interrupts(struct tegra_devfreq *tegra) 374 + { 375 + struct tegra_devfreq_device *dev; 376 + u32 val; 377 + unsigned int i; 378 + 379 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 380 + dev = &tegra->devices[i]; 381 + 382 + val = device_readl(dev, ACTMON_DEV_CTRL); 383 + val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN; 384 + val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 385 + val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 386 + val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 387 + 388 + device_writel(dev, val, ACTMON_DEV_CTRL); 389 + } 390 + 391 + actmon_write_barrier(tegra); 392 + } 393 + 394 + static void tegra_actmon_disable_interrupts(struct tegra_devfreq *tegra) 395 + { 396 + struct tegra_devfreq_device *dev; 397 + u32 val; 398 + unsigned int i; 399 + 400 + for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 401 + dev = &tegra->devices[i]; 402 + 403 + val = device_readl(dev, ACTMON_DEV_CTRL); 404 + val &= ~ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN; 405 + val &= ~ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 406 + val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN; 407 + val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 408 + 409 + device_writel(dev, val, ACTMON_DEV_CTRL); 410 + } 411 + 412 + actmon_write_barrier(tegra); 411 413 } 412 414 413 415 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra, 414 416 struct tegra_devfreq_device *dev) 415 417 { 416 - u32 val; 418 + u32 val = 0; 417 419 418 - dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ; 419 420 dev->target_freq = tegra->cur_freq; 420 421 421 422 dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD; 422 - writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG); 423 + device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG); 423 424 424 - tegra_devfreq_update_avg_wmark(dev); 425 + tegra_devfreq_update_avg_wmark(tegra, dev); 425 426 tegra_devfreq_update_wmark(tegra, dev); 426 427 427 - writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT); 428 - writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS); 428 + device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT); 429 + device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS); 429 430 430 - val = 0; 431 - val |= ACTMON_DEV_CTRL_ENB_PERIODIC | 432 - ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN | 433 - ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN; 431 + val |= ACTMON_DEV_CTRL_ENB_PERIODIC; 434 432 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1) 435 433 << ACTMON_DEV_CTRL_K_VAL_SHIFT; 436 434 val |= (ACTMON_BELOW_WMARK_WINDOW - 1) 437 435 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT; 438 436 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1) 439 437 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT; 440 - val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN | 441 - ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN; 442 - 443 - writel(val, dev->regs + ACTMON_DEV_CTRL); 444 - 445 - actmon_write_barrier(tegra); 446 - 447 - val = readl(dev->regs + ACTMON_DEV_CTRL); 448 438 val |= ACTMON_DEV_CTRL_ENB; 449 - writel(val, dev->regs + ACTMON_DEV_CTRL); 439 + 440 + device_writel(dev, val, ACTMON_DEV_CTRL); 450 441 451 442 actmon_write_barrier(tegra); 452 - } 453 - 454 - static int tegra_devfreq_suspend(struct device *dev) 455 - { 456 - struct platform_device *pdev; 457 - struct tegra_devfreq *tegra; 458 - struct tegra_devfreq_device *actmon_dev; 459 - unsigned int i; 460 - u32 val; 461 - 462 - pdev = container_of(dev, struct platform_device, dev); 463 - tegra = platform_get_drvdata(pdev); 464 - 465 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 466 - actmon_dev = &tegra->devices[i]; 467 - 468 - val = readl(actmon_dev->regs + ACTMON_DEV_CTRL); 469 - val &= ~ACTMON_DEV_CTRL_ENB; 470 - writel(val, actmon_dev->regs + ACTMON_DEV_CTRL); 471 - 472 - writel(ACTMON_INTR_STATUS_CLEAR, 473 - actmon_dev->regs + ACTMON_DEV_INTR_STATUS); 474 - 475 - actmon_write_barrier(tegra); 476 - } 477 - 478 - return 0; 479 - } 480 - 481 - static int tegra_devfreq_resume(struct device *dev) 482 - { 483 - struct platform_device *pdev; 484 - struct tegra_devfreq *tegra; 485 - struct tegra_devfreq_device *actmon_dev; 486 - unsigned int i; 487 - 488 - pdev = container_of(dev, struct platform_device, dev); 489 - tegra = platform_get_drvdata(pdev); 490 - 491 - for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) { 492 - actmon_dev = &tegra->devices[i]; 493 - 494 - tegra_actmon_configure_device(tegra, actmon_dev); 495 - } 496 - 497 - return 0; 498 443 } 499 444 500 445 static int tegra_devfreq_target(struct device *dev, unsigned long *freq, 501 446 u32 flags) 502 447 { 503 - struct platform_device *pdev; 504 - struct tegra_devfreq *tegra; 448 + struct tegra_devfreq *tegra = dev_get_drvdata(dev); 505 449 struct dev_pm_opp *opp; 506 450 unsigned long rate = *freq * KHZ; 507 - 508 - pdev = container_of(dev, struct platform_device, dev); 509 - tegra = platform_get_drvdata(pdev); 510 451 511 452 rcu_read_lock(); 512 453 opp = devfreq_recommended_opp(dev, &rate, flags); ··· 497 480 rate = dev_pm_opp_get_freq(opp); 498 481 rcu_read_unlock(); 499 482 500 - /* TODO: Once we have per-user clk constraints, set a floor */ 501 - clk_set_rate(tegra->emc_clock, rate); 502 - 503 - /* TODO: Set voltage as well */ 483 + clk_set_min_rate(tegra->emc_clock, rate); 484 + clk_set_rate(tegra->emc_clock, 0); 504 485 505 486 return 0; 506 487 } ··· 506 491 static int tegra_devfreq_get_dev_status(struct device *dev, 507 492 struct devfreq_dev_status *stat) 508 493 { 509 - struct platform_device *pdev; 510 - struct tegra_devfreq *tegra; 494 + struct tegra_devfreq *tegra = dev_get_drvdata(dev); 511 495 struct tegra_devfreq_device *actmon_dev; 512 - 513 - pdev = container_of(dev, struct platform_device, dev); 514 - tegra = platform_get_drvdata(pdev); 515 496 516 497 stat->current_frequency = tegra->cur_freq; 517 498 ··· 519 508 actmon_dev = &tegra->devices[MCALL]; 520 509 521 510 /* Number of cycles spent on memory access */ 522 - stat->busy_time = actmon_dev->avg_count; 511 + stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT); 523 512 524 513 /* The bus can be considered to be saturated way before 100% */ 525 514 stat->busy_time *= 100 / BUS_SATURATION_RATIO; ··· 527 516 /* Number of cycles in a sampling period */ 528 517 stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq; 529 518 519 + stat->busy_time = min(stat->busy_time, stat->total_time); 520 + 530 521 return 0; 531 522 } 532 523 533 - static int tegra_devfreq_get_target(struct devfreq *devfreq, 534 - unsigned long *freq) 524 + static struct devfreq_dev_profile tegra_devfreq_profile = { 525 + .polling_ms = 0, 526 + .target = tegra_devfreq_target, 527 + .get_dev_status = tegra_devfreq_get_dev_status, 528 + }; 529 + 530 + static int tegra_governor_get_target(struct devfreq *devfreq, 531 + unsigned long *freq) 535 532 { 536 533 struct devfreq_dev_status stat; 537 534 struct tegra_devfreq *tegra; ··· 567 548 return 0; 568 549 } 569 550 570 - static int tegra_devfreq_event_handler(struct devfreq *devfreq, 571 - unsigned int event, void *data) 551 + static int tegra_governor_event_handler(struct devfreq *devfreq, 552 + unsigned int event, void *data) 572 553 { 573 - return 0; 554 + struct tegra_devfreq *tegra; 555 + int ret = 0; 556 + 557 + tegra = dev_get_drvdata(devfreq->dev.parent); 558 + 559 + switch (event) { 560 + case DEVFREQ_GOV_START: 561 + devfreq_monitor_start(devfreq); 562 + tegra_actmon_enable_interrupts(tegra); 563 + break; 564 + 565 + case DEVFREQ_GOV_STOP: 566 + tegra_actmon_disable_interrupts(tegra); 567 + devfreq_monitor_stop(devfreq); 568 + break; 569 + 570 + case DEVFREQ_GOV_SUSPEND: 571 + tegra_actmon_disable_interrupts(tegra); 572 + devfreq_monitor_suspend(devfreq); 573 + break; 574 + 575 + case DEVFREQ_GOV_RESUME: 576 + devfreq_monitor_resume(devfreq); 577 + tegra_actmon_enable_interrupts(tegra); 578 + break; 579 + } 580 + 581 + return ret; 574 582 } 575 583 576 584 static struct devfreq_governor tegra_devfreq_governor = { 577 - .name = "tegra", 578 - .get_target_freq = tegra_devfreq_get_target, 579 - .event_handler = tegra_devfreq_event_handler, 580 - }; 581 - 582 - static struct devfreq_dev_profile tegra_devfreq_profile = { 583 - .polling_ms = 0, 584 - .target = tegra_devfreq_target, 585 - .get_dev_status = tegra_devfreq_get_dev_status, 585 + .name = "tegra_actmon", 586 + .get_target_freq = tegra_governor_get_target, 587 + .event_handler = tegra_governor_event_handler, 586 588 }; 587 589 588 590 static int tegra_devfreq_probe(struct platform_device *pdev) ··· 611 571 struct tegra_devfreq *tegra; 612 572 struct tegra_devfreq_device *dev; 613 573 struct resource *res; 614 - unsigned long max_freq; 615 574 unsigned int i; 575 + unsigned long rate; 616 576 int irq; 617 577 int err; 618 578 ··· 620 580 if (!tegra) 621 581 return -ENOMEM; 622 582 623 - spin_lock_init(&tegra->lock); 624 - 625 583 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 626 - if (!res) { 627 - dev_err(&pdev->dev, "Failed to get regs resource\n"); 628 - return -ENODEV; 629 - } 630 584 631 585 tegra->regs = devm_ioremap_resource(&pdev->dev, res); 632 - if (IS_ERR(tegra->regs)) { 633 - dev_err(&pdev->dev, "Failed to get IO memory\n"); 586 + if (IS_ERR(tegra->regs)) 634 587 return PTR_ERR(tegra->regs); 635 - } 636 588 637 589 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon"); 638 590 if (IS_ERR(tegra->reset)) { ··· 644 612 return PTR_ERR(tegra->emc_clock); 645 613 } 646 614 647 - err = of_init_opp_table(&pdev->dev); 648 - if (err) { 649 - dev_err(&pdev->dev, "Failed to init operating point table\n"); 650 - return err; 651 - } 615 + clk_set_rate(tegra->emc_clock, ULONG_MAX); 652 616 653 617 tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb; 654 618 err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb); ··· 658 630 659 631 err = clk_prepare_enable(tegra->clock); 660 632 if (err) { 661 - reset_control_deassert(tegra->reset); 633 + dev_err(&pdev->dev, 634 + "Failed to prepare and enable ACTMON clock\n"); 662 635 return err; 663 636 } 664 637 665 638 reset_control_deassert(tegra->reset); 666 639 667 - max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX); 668 - tegra->max_freq = max_freq / KHZ; 669 - 670 - clk_set_rate(tegra->emc_clock, max_freq); 671 - 640 + tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ; 672 641 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ; 673 642 674 - writel(ACTMON_SAMPLING_PERIOD - 1, 675 - tegra->regs + ACTMON_GLB_PERIOD_CTRL); 643 + actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1, 644 + ACTMON_GLB_PERIOD_CTRL); 676 645 677 646 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) { 678 647 dev = tegra->devices + i; 679 648 dev->config = actmon_device_configs + i; 680 649 dev->regs = tegra->regs + dev->config->offset; 650 + spin_lock_init(&dev->lock); 681 651 682 - tegra_actmon_configure_device(tegra, tegra->devices + i); 652 + tegra_actmon_configure_device(tegra, dev); 683 653 } 684 654 685 - err = devfreq_add_governor(&tegra_devfreq_governor); 686 - if (err) { 687 - dev_err(&pdev->dev, "Failed to add governor\n"); 688 - return err; 655 + for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) { 656 + rate = clk_round_rate(tegra->emc_clock, rate); 657 + dev_pm_opp_add(&pdev->dev, rate, 0); 689 658 } 690 - 691 - tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock); 692 - tegra->devfreq = devm_devfreq_add_device(&pdev->dev, 693 - &tegra_devfreq_profile, 694 - "tegra", 695 - NULL); 696 659 697 660 irq = platform_get_irq(pdev, 0); 661 + if (irq <= 0) { 662 + dev_err(&pdev->dev, "Failed to get IRQ\n"); 663 + return -ENODEV; 664 + } 665 + 666 + platform_set_drvdata(pdev, tegra); 667 + 698 668 err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr, 699 669 actmon_thread_isr, IRQF_SHARED, 700 670 "tegra-devfreq", tegra); ··· 701 675 return err; 702 676 } 703 677 704 - platform_set_drvdata(pdev, tegra); 678 + tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock); 679 + tegra->devfreq = devm_devfreq_add_device(&pdev->dev, 680 + &tegra_devfreq_profile, 681 + "tegra_actmon", 682 + NULL); 705 683 706 684 return 0; 707 685 } ··· 713 683 static int tegra_devfreq_remove(struct platform_device *pdev) 714 684 { 715 685 struct tegra_devfreq *tegra = platform_get_drvdata(pdev); 686 + int irq = platform_get_irq(pdev, 0); 687 + u32 val; 688 + unsigned int i; 689 + 690 + for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) { 691 + val = device_readl(&tegra->devices[i], ACTMON_DEV_CTRL); 692 + val &= ~ACTMON_DEV_CTRL_ENB; 693 + device_writel(&tegra->devices[i], val, ACTMON_DEV_CTRL); 694 + } 695 + 696 + actmon_write_barrier(tegra); 697 + 698 + devm_free_irq(&pdev->dev, irq, tegra); 716 699 717 700 clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb); 718 701 ··· 734 691 return 0; 735 692 } 736 693 737 - static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops, 738 - tegra_devfreq_suspend, 739 - tegra_devfreq_resume); 740 - 741 - static struct of_device_id tegra_devfreq_of_match[] = { 694 + static const struct of_device_id tegra_devfreq_of_match[] = { 742 695 { .compatible = "nvidia,tegra124-actmon" }, 743 696 { }, 744 697 }; 698 + 699 + MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match); 745 700 746 701 static struct platform_driver tegra_devfreq_driver = { 747 702 .probe = tegra_devfreq_probe, 748 703 .remove = tegra_devfreq_remove, 749 704 .driver = { 750 - .name = "tegra-devfreq", 751 - .owner = THIS_MODULE, 705 + .name = "tegra-devfreq", 752 706 .of_match_table = tegra_devfreq_of_match, 753 - .pm = &tegra_devfreq_pm_ops, 754 707 }, 755 708 }; 756 - module_platform_driver(tegra_devfreq_driver); 757 709 758 - MODULE_LICENSE("GPL"); 710 + static int __init tegra_devfreq_init(void) 711 + { 712 + int ret = 0; 713 + 714 + ret = devfreq_add_governor(&tegra_devfreq_governor); 715 + if (ret) { 716 + pr_err("%s: failed to add governor: %d\n", __func__, ret); 717 + return ret; 718 + } 719 + 720 + ret = platform_driver_register(&tegra_devfreq_driver); 721 + if (ret) 722 + devfreq_remove_governor(&tegra_devfreq_governor); 723 + 724 + return ret; 725 + } 726 + module_init(tegra_devfreq_init) 727 + 728 + static void __exit tegra_devfreq_exit(void) 729 + { 730 + int ret = 0; 731 + 732 + platform_driver_unregister(&tegra_devfreq_driver); 733 + 734 + ret = devfreq_remove_governor(&tegra_devfreq_governor); 735 + if (ret) 736 + pr_err("%s: failed to remove governor: %d\n", __func__, ret); 737 + } 738 + module_exit(tegra_devfreq_exit) 739 + 740 + MODULE_LICENSE("GPL v2"); 759 741 MODULE_DESCRIPTION("Tegra devfreq driver"); 760 742 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>"); 761 - MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
+2 -1
drivers/powercap/intel_rapl.c
··· 1054 1054 .driver_data = (kernel_ulong_t)&_ops, \ 1055 1055 } 1056 1056 1057 - static const struct x86_cpu_id rapl_ids[] = { 1057 + static const struct x86_cpu_id rapl_ids[] __initconst = { 1058 1058 RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */ 1059 1059 RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */ 1060 1060 RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */ ··· 1062 1062 RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ 1063 1063 RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ 1064 1064 RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ 1065 + RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ 1065 1066 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ 1066 1067 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ 1067 1068 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+1 -1
include/linux/devfreq-event.h
··· 91 91 const char *name; 92 92 void *driver_data; 93 93 94 - struct devfreq_event_ops *ops; 94 + const struct devfreq_event_ops *ops; 95 95 }; 96 96 97 97 #if defined(CONFIG_PM_DEVFREQ_EVENT)