Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30 613 lines 16 kB view raw
1/* 2 * (C) Copyright 2008 Intel Corporation 3 * Authors: 4 * Andy Henroid <andrew.d.henroid@intel.com> 5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 6 */ 7 8/* 9 * Save DIMM power on Intel 7300-based platforms when all CPUs/cores 10 * are idle, using the DIMM thermal throttling capability. 11 * 12 * This driver depends on the Intel integrated DMA controller (I/O AT). 13 * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled, 14 * this driver should work cooperatively. 15 */ 16 17/* #define DEBUG */ 18 19#include <linux/module.h> 20#include <linux/pci.h> 21#include <linux/sched.h> 22#include <linux/notifier.h> 23#include <linux/cpumask.h> 24#include <linux/ktime.h> 25#include <linux/delay.h> 26#include <linux/debugfs.h> 27#include <linux/stop_machine.h> 28#include <linux/i7300_idle.h> 29 30#include <asm/idle.h> 31 32#include "../dma/ioatdma_hw.h" 33#include "../dma/ioatdma_registers.h" 34 35#define I7300_IDLE_DRIVER_VERSION "1.55" 36#define I7300_PRINT "i7300_idle:" 37 38#define MAX_STOP_RETRIES 10 39 40static int debug; 41module_param_named(debug, debug, uint, 0644); 42MODULE_PARM_DESC(debug, "Enable debug printks in this driver"); 43 44static int forceload; 45module_param_named(forceload, forceload, uint, 0644); 46MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000"); 47 48#define dprintk(fmt, arg...) \ 49 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0) 50 51/* 52 * Value to set THRTLOW to when initiating throttling 53 * 0 = No throttling 54 * 1 = Throttle when > 4 activations per eval window (Maximum throttling) 55 * 2 = Throttle when > 8 activations 56 * 168 = Throttle when > 672 activations (Minimum throttling) 57 */ 58#define MAX_THROTTLE_LOW_LIMIT 168 59static uint throttle_low_limit = 1; 60module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644); 61MODULE_PARM_DESC(throttle_low_limit, 62 "Value for THRTLOWLM activation field " 63 "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)"); 64 65/* 66 * simple invocation and duration statistics 67 */ 68static unsigned long total_starts; 69static unsigned long total_us; 70 71#ifdef DEBUG 72static unsigned long past_skip; 73#endif 74 75static struct pci_dev *fbd_dev; 76 77static spinlock_t i7300_idle_lock; 78static int i7300_idle_active; 79 80static u8 i7300_idle_thrtctl_saved; 81static u8 i7300_idle_thrtlow_saved; 82static u32 i7300_idle_mc_saved; 83 84static cpumask_t idle_cpumask; 85static ktime_t start_ktime; 86static unsigned long avg_idle_us; 87 88static struct dentry *debugfs_dir; 89 90/* Begin: I/O AT Helper routines */ 91 92#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan) 93/* Snoop control (disable snoops when coherency is not important) */ 94#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1) 95#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2) 96 97static struct pci_dev *ioat_dev; 98static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */ 99static unsigned long ioat_desc_phys; 100static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */ 101static u8 *ioat_chanbase; 102 103/* Start I/O AT memory copy */ 104static int i7300_idle_ioat_start(void) 105{ 106 u32 err; 107 /* Clear error (due to circular descriptor pointer) */ 108 err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET); 109 if (err) 110 writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET); 111 112 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET); 113 return 0; 114} 115 116/* Stop I/O AT memory copy */ 117static void i7300_idle_ioat_stop(void) 118{ 119 int i; 120 u64 sts; 121 122 for (i = 0; i < MAX_STOP_RETRIES; i++) { 123 writeb(IOAT_CHANCMD_RESET, 124 ioat_chanbase + IOAT1_CHANCMD_OFFSET); 125 126 udelay(10); 127 128 sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 129 IOAT_CHANSTS_DMA_TRANSFER_STATUS; 130 131 if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) 132 break; 133 134 } 135 136 if (i == MAX_STOP_RETRIES) { 137 dprintk("failed to stop I/O AT after %d retries\n", 138 MAX_STOP_RETRIES); 139 } 140} 141 142/* Test I/O AT by copying 1024 byte from 2k to 1k */ 143static int __init i7300_idle_ioat_selftest(u8 *ctl, 144 struct ioat_dma_descriptor *desc, unsigned long desc_phys) 145{ 146 u64 chan_sts; 147 148 memset(desc, 0, 2048); 149 memset((u8 *) desc + 2048, 0xab, 1024); 150 151 desc[0].size = 1024; 152 desc[0].ctl = 0; 153 desc[0].src_addr = desc_phys + 2048; 154 desc[0].dst_addr = desc_phys + 1024; 155 desc[0].next = 0; 156 157 writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET); 158 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET); 159 160 udelay(1000); 161 162 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 163 IOAT_CHANSTS_DMA_TRANSFER_STATUS; 164 165 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) { 166 /* Not complete, reset the channel */ 167 writeb(IOAT_CHANCMD_RESET, 168 ioat_chanbase + IOAT1_CHANCMD_OFFSET); 169 return -1; 170 } 171 172 if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab || 173 *(u32 *) ((u8 *) desc + 2044) != 0xabababab) { 174 dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n", 175 *(u32 *) ((u8 *) desc + 2048), 176 *(u32 *) ((u8 *) desc + 1024), 177 *(u32 *) ((u8 *) desc + 3072)); 178 return -1; 179 } 180 return 0; 181} 182 183static struct device dummy_dma_dev = { 184 .init_name = "fallback device", 185 .coherent_dma_mask = DMA_BIT_MASK(64), 186 .dma_mask = &dummy_dma_dev.coherent_dma_mask, 187}; 188 189/* Setup and initialize I/O AT */ 190/* This driver needs I/O AT as the throttling takes effect only when there is 191 * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs 192 * go idle and memory is throttled. 193 */ 194static int __init i7300_idle_ioat_init(void) 195{ 196 u8 ver, chan_count, ioat_chan; 197 u16 chan_ctl; 198 199 ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0), 200 pci_resource_len(ioat_dev, 0)); 201 202 if (!ioat_iomap) { 203 printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n"); 204 goto err_ret; 205 } 206 207 ver = readb(ioat_iomap + IOAT_VER_OFFSET); 208 if (ver != IOAT_VER_1_2) { 209 printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n", 210 ver >> 4, ver & 0xf); 211 goto err_unmap; 212 } 213 214 chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET); 215 if (!chan_count) { 216 printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels " 217 "(%u)\n", 218 chan_count); 219 goto err_unmap; 220 } 221 222 ioat_chan = chan_count - 1; 223 ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan); 224 225 chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET); 226 if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) { 227 printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan); 228 goto err_unmap; 229 } 230 231 writew(IOAT_CHANCTRL_CHANNEL_IN_USE, 232 ioat_chanbase + IOAT_CHANCTRL_OFFSET); 233 234 ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent( 235 &dummy_dma_dev, 4096, 236 (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL); 237 if (!ioat_desc) { 238 printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n"); 239 goto err_mark_unused; 240 } 241 242 writel(ioat_desc_phys & 0xffffffffUL, 243 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW); 244 writel(ioat_desc_phys >> 32, 245 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH); 246 247 if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) { 248 printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n"); 249 goto err_free; 250 } 251 252 /* Setup circular I/O AT descriptor chain */ 253 ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL; 254 ioat_desc[0].src_addr = ioat_desc_phys + 2048; 255 ioat_desc[0].dst_addr = ioat_desc_phys + 3072; 256 ioat_desc[0].size = 128; 257 ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor); 258 259 ioat_desc[1].ctl = ioat_desc[0].ctl; 260 ioat_desc[1].src_addr = ioat_desc[0].src_addr; 261 ioat_desc[1].dst_addr = ioat_desc[0].dst_addr; 262 ioat_desc[1].size = ioat_desc[0].size; 263 ioat_desc[1].next = ioat_desc_phys; 264 265 return 0; 266 267err_free: 268 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0); 269err_mark_unused: 270 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); 271err_unmap: 272 iounmap(ioat_iomap); 273err_ret: 274 return -ENODEV; 275} 276 277/* Cleanup I/O AT */ 278static void __exit i7300_idle_ioat_exit(void) 279{ 280 int i; 281 u64 chan_sts; 282 283 i7300_idle_ioat_stop(); 284 285 /* Wait for a while for the channel to halt before releasing */ 286 for (i = 0; i < MAX_STOP_RETRIES; i++) { 287 writeb(IOAT_CHANCMD_RESET, 288 ioat_chanbase + IOAT1_CHANCMD_OFFSET); 289 290 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 291 IOAT_CHANSTS_DMA_TRANSFER_STATUS; 292 293 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { 294 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); 295 break; 296 } 297 udelay(1000); 298 } 299 300 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & 301 IOAT_CHANSTS_DMA_TRANSFER_STATUS; 302 303 /* 304 * We tried to reset multiple times. If IO A/T channel is still active 305 * flag an error and return without cleanup. Memory leak is better 306 * than random corruption in that extreme error situation. 307 */ 308 if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { 309 printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." 310 " Not freeing resources\n"); 311 return; 312 } 313 314 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0); 315 iounmap(ioat_iomap); 316} 317 318/* End: I/O AT Helper routines */ 319 320#define DIMM_THRTLOW 0x64 321#define DIMM_THRTCTL 0x67 322#define DIMM_THRTCTL_THRMHUNT (1UL << 0) 323#define DIMM_MC 0x40 324#define DIMM_GTW_MODE (1UL << 17) 325#define DIMM_GBLACT 0x60 326 327/* 328 * Keep track of an exponential-decaying average of recent idle durations. 329 * The latest duration gets DURATION_WEIGHT_PCT percentage weight 330 * in this average, with the old average getting the remaining weight. 331 * 332 * High weights emphasize recent history, low weights include long history. 333 */ 334#define DURATION_WEIGHT_PCT 55 335 336/* 337 * When the decaying average of recent durations or the predicted duration 338 * of the next timer interrupt is shorter than duration_threshold, the 339 * driver will decline to throttle. 340 */ 341#define DURATION_THRESHOLD_US 100 342 343 344/* Store DIMM thermal throttle configuration */ 345static int i7300_idle_thrt_save(void) 346{ 347 u32 new_mc_val; 348 u8 gblactlm; 349 350 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved); 351 pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved); 352 pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved); 353 /* 354 * Make sure we have Global Throttling Window Mode set to have a 355 * "short" window. This (mostly) works around an issue where 356 * throttling persists until the end of the global throttling window 357 * size. On the tested system, this was resulting in a maximum of 358 * 64 ms to exit throttling (average 32 ms). The actual numbers 359 * depends on system frequencies. Setting the short window reduces 360 * this by a factor of 4096. 361 * 362 * We will only do this only if the system is set for 363 * unlimited-activations while in open-loop throttling (i.e., when 364 * Global Activation Throttle Limit is zero). 365 */ 366 pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm); 367 dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n", 368 i7300_idle_thrtctl_saved, 369 i7300_idle_thrtlow_saved); 370 dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n", 371 i7300_idle_mc_saved, 372 gblactlm); 373 if (gblactlm == 0) { 374 new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE; 375 pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val); 376 return 0; 377 } else { 378 dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n"); 379 return -ENODEV; 380 } 381} 382 383/* Restore DIMM thermal throttle configuration */ 384static void i7300_idle_thrt_restore(void) 385{ 386 pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved); 387 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved); 388 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved); 389} 390 391/* Enable DIMM thermal throttling */ 392static void i7300_idle_start(void) 393{ 394 u8 new_ctl; 395 u8 limit; 396 397 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT; 398 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl); 399 400 limit = throttle_low_limit; 401 if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT)) 402 limit = MAX_THROTTLE_LOW_LIMIT; 403 404 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit); 405 406 new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT; 407 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl); 408} 409 410/* Disable DIMM thermal throttling */ 411static void i7300_idle_stop(void) 412{ 413 u8 new_ctl; 414 u8 got_ctl; 415 416 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT; 417 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl); 418 419 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved); 420 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved); 421 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl); 422 WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved); 423} 424 425 426/* 427 * i7300_avg_duration_check() 428 * return 0 if the decaying average of recent idle durations is 429 * more than DURATION_THRESHOLD_US 430 */ 431static int i7300_avg_duration_check(void) 432{ 433 if (avg_idle_us >= DURATION_THRESHOLD_US) 434 return 0; 435 436#ifdef DEBUG 437 past_skip++; 438#endif 439 return 1; 440} 441 442/* Idle notifier to look at idle CPUs */ 443static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val, 444 void *data) 445{ 446 unsigned long flags; 447 ktime_t now_ktime; 448 static ktime_t idle_begin_time; 449 static int time_init = 1; 450 451 if (!throttle_low_limit) 452 return 0; 453 454 if (unlikely(time_init)) { 455 time_init = 0; 456 idle_begin_time = ktime_get(); 457 } 458 459 spin_lock_irqsave(&i7300_idle_lock, flags); 460 if (val == IDLE_START) { 461 462 cpu_set(smp_processor_id(), idle_cpumask); 463 464 if (cpus_weight(idle_cpumask) != num_online_cpus()) 465 goto end; 466 467 now_ktime = ktime_get(); 468 idle_begin_time = now_ktime; 469 470 if (i7300_avg_duration_check()) 471 goto end; 472 473 i7300_idle_active = 1; 474 total_starts++; 475 start_ktime = now_ktime; 476 477 i7300_idle_start(); 478 i7300_idle_ioat_start(); 479 480 } else if (val == IDLE_END) { 481 cpu_clear(smp_processor_id(), idle_cpumask); 482 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) { 483 /* First CPU coming out of idle */ 484 u64 idle_duration_us; 485 486 now_ktime = ktime_get(); 487 488 idle_duration_us = ktime_to_us(ktime_sub 489 (now_ktime, idle_begin_time)); 490 491 avg_idle_us = 492 ((100 - DURATION_WEIGHT_PCT) * avg_idle_us + 493 DURATION_WEIGHT_PCT * idle_duration_us) / 100; 494 495 if (i7300_idle_active) { 496 ktime_t idle_ktime; 497 498 idle_ktime = ktime_sub(now_ktime, start_ktime); 499 total_us += ktime_to_us(idle_ktime); 500 501 i7300_idle_ioat_stop(); 502 i7300_idle_stop(); 503 i7300_idle_active = 0; 504 } 505 } 506 } 507end: 508 spin_unlock_irqrestore(&i7300_idle_lock, flags); 509 return 0; 510} 511 512static struct notifier_block i7300_idle_nb = { 513 .notifier_call = i7300_idle_notifier, 514}; 515 516MODULE_DEVICE_TABLE(pci, pci_tbl); 517 518int stats_open_generic(struct inode *inode, struct file *fp) 519{ 520 fp->private_data = inode->i_private; 521 return 0; 522} 523 524static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count, 525 loff_t *off) 526{ 527 unsigned long *p = fp->private_data; 528 char buf[32]; 529 int len; 530 531 len = snprintf(buf, 32, "%lu\n", *p); 532 return simple_read_from_buffer(ubuf, count, off, buf, len); 533} 534 535static const struct file_operations idle_fops = { 536 .open = stats_open_generic, 537 .read = stats_read_ul, 538}; 539 540struct debugfs_file_info { 541 void *ptr; 542 char name[32]; 543 struct dentry *file; 544} debugfs_file_list[] = { 545 {&total_starts, "total_starts", NULL}, 546 {&total_us, "total_us", NULL}, 547#ifdef DEBUG 548 {&past_skip, "past_skip", NULL}, 549#endif 550 {NULL, "", NULL} 551 }; 552 553static int __init i7300_idle_init(void) 554{ 555 spin_lock_init(&i7300_idle_lock); 556 cpus_clear(idle_cpumask); 557 total_us = 0; 558 559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) 560 return -ENODEV; 561 562 if (i7300_idle_thrt_save()) 563 return -ENODEV; 564 565 if (i7300_idle_ioat_init()) 566 return -ENODEV; 567 568 debugfs_dir = debugfs_create_dir("i7300_idle", NULL); 569 if (debugfs_dir) { 570 int i = 0; 571 572 while (debugfs_file_list[i].ptr != NULL) { 573 debugfs_file_list[i].file = debugfs_create_file( 574 debugfs_file_list[i].name, 575 S_IRUSR, 576 debugfs_dir, 577 debugfs_file_list[i].ptr, 578 &idle_fops); 579 i++; 580 } 581 } 582 583 idle_notifier_register(&i7300_idle_nb); 584 585 printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION); 586 return 0; 587} 588 589static void __exit i7300_idle_exit(void) 590{ 591 idle_notifier_unregister(&i7300_idle_nb); 592 593 if (debugfs_dir) { 594 int i = 0; 595 596 while (debugfs_file_list[i].file != NULL) { 597 debugfs_remove(debugfs_file_list[i].file); 598 i++; 599 } 600 601 debugfs_remove(debugfs_dir); 602 } 603 i7300_idle_thrt_restore(); 604 i7300_idle_ioat_exit(); 605} 606 607module_init(i7300_idle_init); 608module_exit(i7300_idle_exit); 609 610MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>"); 611MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v" 612 I7300_IDLE_DRIVER_VERSION); 613MODULE_LICENSE("GPL");