x86, mrst: Fix whitespace breakage in apb_timer.c

Checkin bb24c4716185f6e116c440462c65c1f56649183b:
"Moorestown APB system timer driver" suffered from severe whitespace
damage in arch/x86/kernel/apb_timer.c due to using Microsoft Lookout
to send a patch. Fix the whitespace breakage.

Reported-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

+487 -487
+487 -487
arch/x86/kernel/apb_timer.c
··· 43 43 #include <asm/fixmap.h> 44 44 #include <asm/apb_timer.h> 45 45 46 - #define APBT_MASK CLOCKSOURCE_MASK(32) 47 - #define APBT_SHIFT 22 48 - #define APBT_CLOCKEVENT_RATING 150 49 - #define APBT_CLOCKSOURCE_RATING 250 50 - #define APBT_MIN_DELTA_USEC 200 46 + #define APBT_MASK CLOCKSOURCE_MASK(32) 47 + #define APBT_SHIFT 22 48 + #define APBT_CLOCKEVENT_RATING 150 49 + #define APBT_CLOCKSOURCE_RATING 250 50 + #define APBT_MIN_DELTA_USEC 200 51 51 52 52 #define EVT_TO_APBT_DEV(evt) container_of(evt, struct apbt_dev, evt) 53 53 #define APBT_CLOCKEVENT0_NUM (0) ··· 65 65 static uint64_t apbt_freq; 66 66 67 67 static void apbt_set_mode(enum clock_event_mode mode, 68 - struct clock_event_device *evt); 68 + struct clock_event_device *evt); 69 69 static int apbt_next_event(unsigned long delta, 70 - struct clock_event_device *evt); 70 + struct clock_event_device *evt); 71 71 static cycle_t apbt_read_clocksource(struct clocksource *cs); 72 72 static void apbt_restart_clocksource(void); 73 73 74 74 struct apbt_dev { 75 - struct clock_event_device evt; 76 - unsigned int num; 77 - int cpu; 78 - unsigned int irq; 79 - unsigned int tick; 80 - unsigned int count; 81 - unsigned int flags; 82 - char name[10]; 75 + struct clock_event_device evt; 76 + unsigned int num; 77 + int cpu; 78 + unsigned int irq; 79 + unsigned int tick; 80 + unsigned int count; 81 + unsigned int flags; 82 + char name[10]; 83 83 }; 84 84 85 85 int disable_apbt_percpu __cpuinitdata; ··· 91 91 static struct apbt_dev *apbt_devs; 92 92 #endif 93 93 94 - static inline unsigned long apbt_readl_reg(unsigned long a) 94 + static inline unsigned long apbt_readl_reg(unsigned long a) 95 95 { 96 - return readl(apbt_virt_address + a); 96 + return readl(apbt_virt_address + a); 97 97 } 98 98 99 99 static inline void apbt_writel_reg(unsigned long d, unsigned long a) 100 100 { 101 - writel(d, apbt_virt_address + a); 101 + writel(d, apbt_virt_address + a); 102 102 } 103 103 104 104 static inline unsigned long apbt_readl(int n, unsigned long a) 105 105 { 106 - return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE); 106 + return readl(apbt_virt_address + a + n * APBTMRS_REG_SIZE); 107 107 } 108 108 109 109 static inline void apbt_writel(int n, unsigned long d, unsigned long a) 110 110 { 111 - writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE); 111 + writel(d, apbt_virt_address + a + n * APBTMRS_REG_SIZE); 112 112 } 113 113 114 114 static inline void apbt_set_mapping(void) 115 115 { 116 - struct sfi_timer_table_entry *mtmr; 116 + struct sfi_timer_table_entry *mtmr; 117 117 118 - if (apbt_virt_address) { 119 - pr_debug("APBT base already mapped\n"); 120 - return; 121 - } 122 - mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 123 - if (mtmr == NULL) { 124 - printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 125 - APBT_CLOCKEVENT0_NUM); 126 - return; 127 - } 128 - apbt_address = (unsigned long)mtmr->phys_addr; 129 - if (!apbt_address) { 130 - printk(KERN_WARNING "No timer base from SFI, use default\n"); 131 - apbt_address = APBT_DEFAULT_BASE; 132 - } 133 - apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); 134 - if (apbt_virt_address) { 135 - pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\ 136 - (void *)apbt_address, (void *)apbt_virt_address); 137 - } else { 138 - pr_debug("Failed mapping APBT phy address at %p\n",\ 139 - (void *)apbt_address); 140 - goto panic_noapbt; 141 - } 142 - apbt_freq = mtmr->freq_hz / USEC_PER_SEC; 143 - sfi_free_mtmr(mtmr); 118 + if (apbt_virt_address) { 119 + pr_debug("APBT base already mapped\n"); 120 + return; 121 + } 122 + mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 123 + if (mtmr == NULL) { 124 + printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 125 + APBT_CLOCKEVENT0_NUM); 126 + return; 127 + } 128 + apbt_address = (unsigned long)mtmr->phys_addr; 129 + if (!apbt_address) { 130 + printk(KERN_WARNING "No timer base from SFI, use default\n"); 131 + apbt_address = APBT_DEFAULT_BASE; 132 + } 133 + apbt_virt_address = ioremap_nocache(apbt_address, APBT_MMAP_SIZE); 134 + if (apbt_virt_address) { 135 + pr_debug("Mapped APBT physical addr %p at virtual addr %p\n",\ 136 + (void *)apbt_address, (void *)apbt_virt_address); 137 + } else { 138 + pr_debug("Failed mapping APBT phy address at %p\n",\ 139 + (void *)apbt_address); 140 + goto panic_noapbt; 141 + } 142 + apbt_freq = mtmr->freq_hz / USEC_PER_SEC; 143 + sfi_free_mtmr(mtmr); 144 144 145 - /* Now figure out the physical timer id for clocksource device */ 146 - mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); 147 - if (mtmr == NULL) 148 - goto panic_noapbt; 145 + /* Now figure out the physical timer id for clocksource device */ 146 + mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); 147 + if (mtmr == NULL) 148 + goto panic_noapbt; 149 149 150 - /* Now figure out the physical timer id */ 151 - phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) 152 - / APBTMRS_REG_SIZE; 153 - pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id); 154 - return; 150 + /* Now figure out the physical timer id */ 151 + phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) 152 + / APBTMRS_REG_SIZE; 153 + pr_debug("Use timer %d for clocksource\n", phy_cs_timer_id); 154 + return; 155 155 156 156 panic_noapbt: 157 - panic("Failed to setup APB system timer\n"); 157 + panic("Failed to setup APB system timer\n"); 158 158 159 159 } 160 160 161 161 static inline void apbt_clear_mapping(void) 162 162 { 163 - iounmap(apbt_virt_address); 164 - apbt_virt_address = NULL; 163 + iounmap(apbt_virt_address); 164 + apbt_virt_address = NULL; 165 165 } 166 166 167 167 /* ··· 169 169 */ 170 170 static inline int is_apbt_capable(void) 171 171 { 172 - return apbt_virt_address ? 1 : 0; 172 + return apbt_virt_address ? 1 : 0; 173 173 } 174 174 175 175 static struct clocksource clocksource_apbt = { 176 - .name = "apbt", 177 - .rating = APBT_CLOCKSOURCE_RATING, 178 - .read = apbt_read_clocksource, 179 - .mask = APBT_MASK, 180 - .shift = APBT_SHIFT, 181 - .flags = CLOCK_SOURCE_IS_CONTINUOUS, 182 - .resume = apbt_restart_clocksource, 176 + .name = "apbt", 177 + .rating = APBT_CLOCKSOURCE_RATING, 178 + .read = apbt_read_clocksource, 179 + .mask = APBT_MASK, 180 + .shift = APBT_SHIFT, 181 + .flags = CLOCK_SOURCE_IS_CONTINUOUS, 182 + .resume = apbt_restart_clocksource, 183 183 }; 184 184 185 185 /* boot APB clock event device */ 186 186 static struct clock_event_device apbt_clockevent = { 187 - .name = "apbt0", 188 - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 189 - .set_mode = apbt_set_mode, 190 - .set_next_event = apbt_next_event, 191 - .shift = APBT_SHIFT, 192 - .irq = 0, 193 - .rating = APBT_CLOCKEVENT_RATING, 187 + .name = "apbt0", 188 + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 189 + .set_mode = apbt_set_mode, 190 + .set_next_event = apbt_next_event, 191 + .shift = APBT_SHIFT, 192 + .irq = 0, 193 + .rating = APBT_CLOCKEVENT_RATING, 194 194 }; 195 195 196 196 /* ··· 199 199 */ 200 200 static inline int __init setup_x86_mrst_timer(char *arg) 201 201 { 202 - if (!arg) 203 - return -EINVAL; 202 + if (!arg) 203 + return -EINVAL; 204 204 205 - if (strcmp("apbt_only", arg) == 0) 206 - disable_apbt_percpu = 0; 207 - else if (strcmp("lapic_and_apbt", arg) == 0) 208 - disable_apbt_percpu = 1; 209 - else { 210 - pr_warning("X86 MRST timer option %s not recognised" 211 - " use x86_mrst_timer=apbt_only or lapic_and_apbt\n", 212 - arg); 213 - return -EINVAL; 214 - } 215 - return 0; 205 + if (strcmp("apbt_only", arg) == 0) 206 + disable_apbt_percpu = 0; 207 + else if (strcmp("lapic_and_apbt", arg) == 0) 208 + disable_apbt_percpu = 1; 209 + else { 210 + pr_warning("X86 MRST timer option %s not recognised" 211 + " use x86_mrst_timer=apbt_only or lapic_and_apbt\n", 212 + arg); 213 + return -EINVAL; 214 + } 215 + return 0; 216 216 } 217 217 __setup("x86_mrst_timer=", setup_x86_mrst_timer); 218 218 ··· 222 222 */ 223 223 static void apbt_start_counter(int n) 224 224 { 225 - unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 225 + unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 226 226 227 - ctrl &= ~APBTMR_CONTROL_ENABLE; 228 - apbt_writel(n, ctrl, APBTMR_N_CONTROL); 229 - apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT); 230 - /* enable, mask interrupt */ 231 - ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; 232 - ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); 233 - apbt_writel(n, ctrl, APBTMR_N_CONTROL); 234 - /* read it once to get cached counter value initialized */ 235 - apbt_read_clocksource(&clocksource_apbt); 227 + ctrl &= ~APBTMR_CONTROL_ENABLE; 228 + apbt_writel(n, ctrl, APBTMR_N_CONTROL); 229 + apbt_writel(n, ~0, APBTMR_N_LOAD_COUNT); 230 + /* enable, mask interrupt */ 231 + ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; 232 + ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT); 233 + apbt_writel(n, ctrl, APBTMR_N_CONTROL); 234 + /* read it once to get cached counter value initialized */ 235 + apbt_read_clocksource(&clocksource_apbt); 236 236 } 237 237 238 238 static irqreturn_t apbt_interrupt_handler(int irq, void *data) 239 239 { 240 - struct apbt_dev *dev = (struct apbt_dev *)data; 241 - struct clock_event_device *aevt = &dev->evt; 240 + struct apbt_dev *dev = (struct apbt_dev *)data; 241 + struct clock_event_device *aevt = &dev->evt; 242 242 243 - if (!aevt->event_handler) { 244 - printk(KERN_INFO "Spurious APBT timer interrupt on %d\n", 245 - dev->num); 246 - return IRQ_NONE; 247 - } 248 - aevt->event_handler(aevt); 249 - return IRQ_HANDLED; 243 + if (!aevt->event_handler) { 244 + printk(KERN_INFO "Spurious APBT timer interrupt on %d\n", 245 + dev->num); 246 + return IRQ_NONE; 247 + } 248 + aevt->event_handler(aevt); 249 + return IRQ_HANDLED; 250 250 } 251 251 252 252 static void apbt_restart_clocksource(void) 253 253 { 254 - apbt_start_counter(phy_cs_timer_id); 254 + apbt_start_counter(phy_cs_timer_id); 255 255 } 256 256 257 257 /* Setup IRQ routing via IOAPIC */ 258 258 #ifdef CONFIG_SMP 259 259 static void apbt_setup_irq(struct apbt_dev *adev) 260 260 { 261 - struct irq_chip *chip; 262 - struct irq_desc *desc; 261 + struct irq_chip *chip; 262 + struct irq_desc *desc; 263 263 264 - /* timer0 irq has been setup early */ 265 - if (adev->irq == 0) 266 - return; 267 - desc = irq_to_desc(adev->irq); 268 - chip = get_irq_chip(adev->irq); 269 - disable_irq(adev->irq); 270 - desc->status |= IRQ_MOVE_PCNTXT; 271 - irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 272 - /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ 273 - set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); 274 - enable_irq(adev->irq); 275 - if (system_state == SYSTEM_BOOTING) 276 - if (request_irq(adev->irq, apbt_interrupt_handler, 277 - IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 278 - adev->name, adev)) { 279 - printk(KERN_ERR "Failed request IRQ for APBT%d\n", 280 - adev->num); 281 - } 264 + /* timer0 irq has been setup early */ 265 + if (adev->irq == 0) 266 + return; 267 + desc = irq_to_desc(adev->irq); 268 + chip = get_irq_chip(adev->irq); 269 + disable_irq(adev->irq); 270 + desc->status |= IRQ_MOVE_PCNTXT; 271 + irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 272 + /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ 273 + set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); 274 + enable_irq(adev->irq); 275 + if (system_state == SYSTEM_BOOTING) 276 + if (request_irq(adev->irq, apbt_interrupt_handler, 277 + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 278 + adev->name, adev)) { 279 + printk(KERN_ERR "Failed request IRQ for APBT%d\n", 280 + adev->num); 281 + } 282 282 } 283 283 #endif 284 284 285 285 static void apbt_enable_int(int n) 286 286 { 287 - unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 288 - /* clear pending intr */ 289 - apbt_readl(n, APBTMR_N_EOI); 290 - ctrl &= ~APBTMR_CONTROL_INT; 291 - apbt_writel(n, ctrl, APBTMR_N_CONTROL); 287 + unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 288 + /* clear pending intr */ 289 + apbt_readl(n, APBTMR_N_EOI); 290 + ctrl &= ~APBTMR_CONTROL_INT; 291 + apbt_writel(n, ctrl, APBTMR_N_CONTROL); 292 292 } 293 293 294 294 static void apbt_disable_int(int n) 295 295 { 296 - unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 296 + unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 297 297 298 - ctrl |= APBTMR_CONTROL_INT; 299 - apbt_writel(n, ctrl, APBTMR_N_CONTROL); 298 + ctrl |= APBTMR_CONTROL_INT; 299 + apbt_writel(n, ctrl, APBTMR_N_CONTROL); 300 300 } 301 301 302 302 303 303 static int __init apbt_clockevent_register(void) 304 304 { 305 - struct sfi_timer_table_entry *mtmr; 306 - struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); 305 + struct sfi_timer_table_entry *mtmr; 306 + struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev); 307 307 308 - mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 309 - if (mtmr == NULL) { 310 - printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 311 - APBT_CLOCKEVENT0_NUM); 312 - return -ENODEV; 313 - } 308 + mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 309 + if (mtmr == NULL) { 310 + printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 311 + APBT_CLOCKEVENT0_NUM); 312 + return -ENODEV; 313 + } 314 314 315 - /* 316 - * We need to calculate the scaled math multiplication factor for 317 - * nanosecond to apbt tick conversion. 318 - * mult = (nsec/cycle)*2^APBT_SHIFT 319 - */ 320 - apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz 321 - , NSEC_PER_SEC, APBT_SHIFT); 315 + /* 316 + * We need to calculate the scaled math multiplication factor for 317 + * nanosecond to apbt tick conversion. 318 + * mult = (nsec/cycle)*2^APBT_SHIFT 319 + */ 320 + apbt_clockevent.mult = div_sc((unsigned long) mtmr->freq_hz 321 + , NSEC_PER_SEC, APBT_SHIFT); 322 322 323 - /* Calculate the min / max delta */ 324 - apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 325 - &apbt_clockevent); 326 - apbt_clockevent.min_delta_ns = clockevent_delta2ns( 327 - APBT_MIN_DELTA_USEC*apbt_freq, 328 - &apbt_clockevent); 329 - /* 330 - * Start apbt with the boot cpu mask and make it 331 - * global if not used for per cpu timer. 332 - */ 333 - apbt_clockevent.cpumask = cpumask_of(smp_processor_id()); 334 - adev->num = smp_processor_id(); 335 - memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 323 + /* Calculate the min / max delta */ 324 + apbt_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 325 + &apbt_clockevent); 326 + apbt_clockevent.min_delta_ns = clockevent_delta2ns( 327 + APBT_MIN_DELTA_USEC*apbt_freq, 328 + &apbt_clockevent); 329 + /* 330 + * Start apbt with the boot cpu mask and make it 331 + * global if not used for per cpu timer. 332 + */ 333 + apbt_clockevent.cpumask = cpumask_of(smp_processor_id()); 334 + adev->num = smp_processor_id(); 335 + memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 336 336 337 - if (disable_apbt_percpu) { 338 - apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 337 + if (disable_apbt_percpu) { 338 + apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 339 339 global_clock_event = &adev->evt; 340 - printk(KERN_DEBUG "%s clockevent registered as global\n", 341 - global_clock_event->name); 342 - } 340 + printk(KERN_DEBUG "%s clockevent registered as global\n", 341 + global_clock_event->name); 342 + } 343 343 344 - if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, 345 - IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 346 - apbt_clockevent.name, adev)) { 347 - printk(KERN_ERR "Failed request IRQ for APBT%d\n", 348 - apbt_clockevent.irq); 349 - } 344 + if (request_irq(apbt_clockevent.irq, apbt_interrupt_handler, 345 + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 346 + apbt_clockevent.name, adev)) { 347 + printk(KERN_ERR "Failed request IRQ for APBT%d\n", 348 + apbt_clockevent.irq); 349 + } 350 350 351 - clockevents_register_device(&adev->evt); 352 - /* Start APBT 0 interrupts */ 353 - apbt_enable_int(APBT_CLOCKEVENT0_NUM); 351 + clockevents_register_device(&adev->evt); 352 + /* Start APBT 0 interrupts */ 353 + apbt_enable_int(APBT_CLOCKEVENT0_NUM); 354 354 355 - sfi_free_mtmr(mtmr); 356 - return 0; 355 + sfi_free_mtmr(mtmr); 356 + return 0; 357 357 } 358 358 359 359 #ifdef CONFIG_SMP 360 360 /* Should be called with per cpu */ 361 361 void apbt_setup_secondary_clock(void) 362 362 { 363 - struct apbt_dev *adev; 364 - struct clock_event_device *aevt; 365 - int cpu; 363 + struct apbt_dev *adev; 364 + struct clock_event_device *aevt; 365 + int cpu; 366 366 367 - /* Don't register boot CPU clockevent */ 368 - cpu = smp_processor_id(); 369 - if (cpu == boot_cpu_id) 370 - return; 371 - /* 372 - * We need to calculate the scaled math multiplication factor for 373 - * nanosecond to apbt tick conversion. 374 - * mult = (nsec/cycle)*2^APBT_SHIFT 375 - */ 376 - printk(KERN_INFO "Init per CPU clockevent %d\n", cpu); 377 - adev = &per_cpu(cpu_apbt_dev, cpu); 378 - aevt = &adev->evt; 367 + /* Don't register boot CPU clockevent */ 368 + cpu = smp_processor_id(); 369 + if (cpu == boot_cpu_id) 370 + return; 371 + /* 372 + * We need to calculate the scaled math multiplication factor for 373 + * nanosecond to apbt tick conversion. 374 + * mult = (nsec/cycle)*2^APBT_SHIFT 375 + */ 376 + printk(KERN_INFO "Init per CPU clockevent %d\n", cpu); 377 + adev = &per_cpu(cpu_apbt_dev, cpu); 378 + aevt = &adev->evt; 379 379 380 - memcpy(aevt, &apbt_clockevent, sizeof(*aevt)); 381 - aevt->cpumask = cpumask_of(cpu); 382 - aevt->name = adev->name; 383 - aevt->mode = CLOCK_EVT_MODE_UNUSED; 380 + memcpy(aevt, &apbt_clockevent, sizeof(*aevt)); 381 + aevt->cpumask = cpumask_of(cpu); 382 + aevt->name = adev->name; 383 + aevt->mode = CLOCK_EVT_MODE_UNUSED; 384 384 385 - printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n", 386 - cpu, aevt->name, *(u32 *)aevt->cpumask); 385 + printk(KERN_INFO "Registering CPU %d clockevent device %s, mask %08x\n", 386 + cpu, aevt->name, *(u32 *)aevt->cpumask); 387 387 388 - apbt_setup_irq(adev); 388 + apbt_setup_irq(adev); 389 389 390 - clockevents_register_device(aevt); 390 + clockevents_register_device(aevt); 391 391 392 - apbt_enable_int(cpu); 392 + apbt_enable_int(cpu); 393 393 394 - return; 394 + return; 395 395 } 396 396 397 397 /* ··· 405 405 * the extra interrupt is harmless. 406 406 */ 407 407 static int apbt_cpuhp_notify(struct notifier_block *n, 408 - unsigned long action, void *hcpu) 408 + unsigned long action, void *hcpu) 409 409 { 410 - unsigned long cpu = (unsigned long)hcpu; 411 - struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); 410 + unsigned long cpu = (unsigned long)hcpu; 411 + struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); 412 412 413 - switch (action & 0xf) { 414 - case CPU_DEAD: 415 - apbt_disable_int(cpu); 416 - if (system_state == SYSTEM_RUNNING) 417 - pr_debug("skipping APBT CPU %lu offline\n", cpu); 418 - else if (adev) { 419 - pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 420 - free_irq(adev->irq, adev); 421 - } 422 - break; 423 - default: 424 - pr_debug(KERN_INFO "APBT notified %lu, no action\n", action); 425 - } 426 - return NOTIFY_OK; 413 + switch (action & 0xf) { 414 + case CPU_DEAD: 415 + apbt_disable_int(cpu); 416 + if (system_state == SYSTEM_RUNNING) 417 + pr_debug("skipping APBT CPU %lu offline\n", cpu); 418 + else if (adev) { 419 + pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 420 + free_irq(adev->irq, adev); 421 + } 422 + break; 423 + default: 424 + pr_debug(KERN_INFO "APBT notified %lu, no action\n", action); 425 + } 426 + return NOTIFY_OK; 427 427 } 428 428 429 429 static __init int apbt_late_init(void) 430 430 { 431 - if (disable_apbt_percpu) 432 - return 0; 433 - /* This notifier should be called after workqueue is ready */ 434 - hotcpu_notifier(apbt_cpuhp_notify, -20); 435 - return 0; 431 + if (disable_apbt_percpu) 432 + return 0; 433 + /* This notifier should be called after workqueue is ready */ 434 + hotcpu_notifier(apbt_cpuhp_notify, -20); 435 + return 0; 436 436 } 437 437 fs_initcall(apbt_late_init); 438 438 #else ··· 442 442 #endif /* CONFIG_SMP */ 443 443 444 444 static void apbt_set_mode(enum clock_event_mode mode, 445 - struct clock_event_device *evt) 445 + struct clock_event_device *evt) 446 446 { 447 - unsigned long ctrl; 448 - uint64_t delta; 449 - int timer_num; 450 - struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); 447 + unsigned long ctrl; 448 + uint64_t delta; 449 + int timer_num; 450 + struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); 451 451 452 - timer_num = adev->num; 453 - pr_debug("%s CPU %d timer %d mode=%d\n", 454 - __func__, first_cpu(*evt->cpumask), timer_num, mode); 452 + timer_num = adev->num; 453 + pr_debug("%s CPU %d timer %d mode=%d\n", 454 + __func__, first_cpu(*evt->cpumask), timer_num, mode); 455 455 456 - switch (mode) { 457 - case CLOCK_EVT_MODE_PERIODIC: 458 - delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult; 459 - delta >>= apbt_clockevent.shift; 460 - ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 461 - ctrl |= APBTMR_CONTROL_MODE_PERIODIC; 462 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 463 - /* 464 - * DW APB p. 46, have to disable timer before load counter, 465 - * may cause sync problem. 466 - */ 467 - ctrl &= ~APBTMR_CONTROL_ENABLE; 468 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 469 - udelay(1); 470 - pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ); 471 - apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); 472 - ctrl |= APBTMR_CONTROL_ENABLE; 473 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 474 - break; 475 - /* APB timer does not have one-shot mode, use free running mode */ 476 - case CLOCK_EVT_MODE_ONESHOT: 477 - ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 478 - /* 479 - * set free running mode, this mode will let timer reload max 480 - * timeout which will give time (3min on 25MHz clock) to rearm 481 - * the next event, therefore emulate the one-shot mode. 482 - */ 483 - ctrl &= ~APBTMR_CONTROL_ENABLE; 484 - ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; 456 + switch (mode) { 457 + case CLOCK_EVT_MODE_PERIODIC: 458 + delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * apbt_clockevent.mult; 459 + delta >>= apbt_clockevent.shift; 460 + ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 461 + ctrl |= APBTMR_CONTROL_MODE_PERIODIC; 462 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 463 + /* 464 + * DW APB p. 46, have to disable timer before load counter, 465 + * may cause sync problem. 466 + */ 467 + ctrl &= ~APBTMR_CONTROL_ENABLE; 468 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 469 + udelay(1); 470 + pr_debug("Setting clock period %d for HZ %d\n", (int)delta, HZ); 471 + apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); 472 + ctrl |= APBTMR_CONTROL_ENABLE; 473 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 474 + break; 475 + /* APB timer does not have one-shot mode, use free running mode */ 476 + case CLOCK_EVT_MODE_ONESHOT: 477 + ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 478 + /* 479 + * set free running mode, this mode will let timer reload max 480 + * timeout which will give time (3min on 25MHz clock) to rearm 481 + * the next event, therefore emulate the one-shot mode. 482 + */ 483 + ctrl &= ~APBTMR_CONTROL_ENABLE; 484 + ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC; 485 485 486 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 487 - /* write again to set free running mode */ 488 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 486 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 487 + /* write again to set free running mode */ 488 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 489 489 490 - /* 491 - * DW APB p. 46, load counter with all 1s before starting free 492 - * running mode. 493 - */ 494 - apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT); 495 - ctrl &= ~APBTMR_CONTROL_INT; 496 - ctrl |= APBTMR_CONTROL_ENABLE; 497 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 498 - break; 490 + /* 491 + * DW APB p. 46, load counter with all 1s before starting free 492 + * running mode. 493 + */ 494 + apbt_writel(timer_num, ~0, APBTMR_N_LOAD_COUNT); 495 + ctrl &= ~APBTMR_CONTROL_INT; 496 + ctrl |= APBTMR_CONTROL_ENABLE; 497 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 498 + break; 499 499 500 - case CLOCK_EVT_MODE_UNUSED: 501 - case CLOCK_EVT_MODE_SHUTDOWN: 502 - apbt_disable_int(timer_num); 503 - ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 504 - ctrl &= ~APBTMR_CONTROL_ENABLE; 505 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 506 - break; 500 + case CLOCK_EVT_MODE_UNUSED: 501 + case CLOCK_EVT_MODE_SHUTDOWN: 502 + apbt_disable_int(timer_num); 503 + ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 504 + ctrl &= ~APBTMR_CONTROL_ENABLE; 505 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 506 + break; 507 507 508 - case CLOCK_EVT_MODE_RESUME: 509 - apbt_enable_int(timer_num); 510 - break; 511 - } 508 + case CLOCK_EVT_MODE_RESUME: 509 + apbt_enable_int(timer_num); 510 + break; 511 + } 512 512 } 513 513 514 514 static int apbt_next_event(unsigned long delta, 515 - struct clock_event_device *evt) 515 + struct clock_event_device *evt) 516 516 { 517 - unsigned long ctrl; 518 - int timer_num; 517 + unsigned long ctrl; 518 + int timer_num; 519 519 520 - struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); 520 + struct apbt_dev *adev = EVT_TO_APBT_DEV(evt); 521 521 522 - timer_num = adev->num; 523 - /* Disable timer */ 524 - ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 525 - ctrl &= ~APBTMR_CONTROL_ENABLE; 526 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 527 - /* write new count */ 528 - apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); 529 - ctrl |= APBTMR_CONTROL_ENABLE; 530 - apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 531 - return 0; 522 + timer_num = adev->num; 523 + /* Disable timer */ 524 + ctrl = apbt_readl(timer_num, APBTMR_N_CONTROL); 525 + ctrl &= ~APBTMR_CONTROL_ENABLE; 526 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 527 + /* write new count */ 528 + apbt_writel(timer_num, delta, APBTMR_N_LOAD_COUNT); 529 + ctrl |= APBTMR_CONTROL_ENABLE; 530 + apbt_writel(timer_num, ctrl, APBTMR_N_CONTROL); 531 + return 0; 532 532 } 533 533 534 534 /* ··· 540 540 */ 541 541 static cycle_t apbt_read_clocksource(struct clocksource *cs) 542 542 { 543 - unsigned long t0, t1, t2; 544 - static unsigned long last_read; 543 + unsigned long t0, t1, t2; 544 + static unsigned long last_read; 545 545 546 546 bad_count: 547 - t1 = apbt_readl(phy_cs_timer_id, 548 - APBTMR_N_CURRENT_VALUE); 549 - t2 = apbt_readl(phy_cs_timer_id, 550 - APBTMR_N_CURRENT_VALUE); 551 - if (unlikely(t1 < t2)) { 552 - pr_debug("APBT: read current count error %lx:%lx:%lx\n", 553 - t1, t2, t2 - t1); 554 - goto bad_count; 555 - } 556 - /* 557 - * check against cached last read, makes sure time does not go back. 558 - * it could be a normal rollover but we will do tripple check anyway 559 - */ 560 - if (unlikely(t2 > last_read)) { 561 - /* check if we have a normal rollover */ 562 - unsigned long raw_intr_status = 563 - apbt_readl_reg(APBTMRS_RAW_INT_STATUS); 564 - /* 565 - * cs timer interrupt is masked but raw intr bit is set if 566 - * rollover occurs. then we read EOI reg to clear it. 567 - */ 568 - if (raw_intr_status & (1 << phy_cs_timer_id)) { 569 - apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); 570 - goto out; 571 - } 572 - pr_debug("APB CS going back %lx:%lx:%lx ", 573 - t2, last_read, t2 - last_read); 547 + t1 = apbt_readl(phy_cs_timer_id, 548 + APBTMR_N_CURRENT_VALUE); 549 + t2 = apbt_readl(phy_cs_timer_id, 550 + APBTMR_N_CURRENT_VALUE); 551 + if (unlikely(t1 < t2)) { 552 + pr_debug("APBT: read current count error %lx:%lx:%lx\n", 553 + t1, t2, t2 - t1); 554 + goto bad_count; 555 + } 556 + /* 557 + * check against cached last read, makes sure time does not go back. 558 + * it could be a normal rollover but we will do tripple check anyway 559 + */ 560 + if (unlikely(t2 > last_read)) { 561 + /* check if we have a normal rollover */ 562 + unsigned long raw_intr_status = 563 + apbt_readl_reg(APBTMRS_RAW_INT_STATUS); 564 + /* 565 + * cs timer interrupt is masked but raw intr bit is set if 566 + * rollover occurs. then we read EOI reg to clear it. 567 + */ 568 + if (raw_intr_status & (1 << phy_cs_timer_id)) { 569 + apbt_readl(phy_cs_timer_id, APBTMR_N_EOI); 570 + goto out; 571 + } 572 + pr_debug("APB CS going back %lx:%lx:%lx ", 573 + t2, last_read, t2 - last_read); 574 574 bad_count_x3: 575 - pr_debug(KERN_INFO "tripple check enforced\n"); 576 - t0 = apbt_readl(phy_cs_timer_id, 577 - APBTMR_N_CURRENT_VALUE); 578 - udelay(1); 579 - t1 = apbt_readl(phy_cs_timer_id, 580 - APBTMR_N_CURRENT_VALUE); 581 - udelay(1); 582 - t2 = apbt_readl(phy_cs_timer_id, 583 - APBTMR_N_CURRENT_VALUE); 584 - if ((t2 > t1) || (t1 > t0)) { 585 - printk(KERN_ERR "Error: APB CS tripple check failed\n"); 586 - goto bad_count_x3; 587 - } 588 - } 575 + pr_debug(KERN_INFO "tripple check enforced\n"); 576 + t0 = apbt_readl(phy_cs_timer_id, 577 + APBTMR_N_CURRENT_VALUE); 578 + udelay(1); 579 + t1 = apbt_readl(phy_cs_timer_id, 580 + APBTMR_N_CURRENT_VALUE); 581 + udelay(1); 582 + t2 = apbt_readl(phy_cs_timer_id, 583 + APBTMR_N_CURRENT_VALUE); 584 + if ((t2 > t1) || (t1 > t0)) { 585 + printk(KERN_ERR "Error: APB CS tripple check failed\n"); 586 + goto bad_count_x3; 587 + } 588 + } 589 589 out: 590 - last_read = t2; 591 - return (cycle_t)~t2; 590 + last_read = t2; 591 + return (cycle_t)~t2; 592 592 } 593 593 594 594 static int apbt_clocksource_register(void) 595 595 { 596 - u64 start, now; 597 - cycle_t t1; 596 + u64 start, now; 597 + cycle_t t1; 598 598 599 - /* Start the counter, use timer 2 as source, timer 0/1 for event */ 600 - apbt_start_counter(phy_cs_timer_id); 599 + /* Start the counter, use timer 2 as source, timer 0/1 for event */ 600 + apbt_start_counter(phy_cs_timer_id); 601 601 602 - /* Verify whether apbt counter works */ 603 - t1 = apbt_read_clocksource(&clocksource_apbt); 604 - rdtscll(start); 602 + /* Verify whether apbt counter works */ 603 + t1 = apbt_read_clocksource(&clocksource_apbt); 604 + rdtscll(start); 605 605 606 - /* 607 - * We don't know the TSC frequency yet, but waiting for 608 - * 200000 TSC cycles is safe: 609 - * 4 GHz == 50us 610 - * 1 GHz == 200us 611 - */ 612 - do { 613 - rep_nop(); 614 - rdtscll(now); 615 - } while ((now - start) < 200000UL); 606 + /* 607 + * We don't know the TSC frequency yet, but waiting for 608 + * 200000 TSC cycles is safe: 609 + * 4 GHz == 50us 610 + * 1 GHz == 200us 611 + */ 612 + do { 613 + rep_nop(); 614 + rdtscll(now); 615 + } while ((now - start) < 200000UL); 616 616 617 - /* APBT is the only always on clocksource, it has to work! */ 618 - if (t1 == apbt_read_clocksource(&clocksource_apbt)) 619 - panic("APBT counter not counting. APBT disabled\n"); 617 + /* APBT is the only always on clocksource, it has to work! */ 618 + if (t1 == apbt_read_clocksource(&clocksource_apbt)) 619 + panic("APBT counter not counting. APBT disabled\n"); 620 620 621 - /* 622 - * initialize and register APBT clocksource 623 - * convert that to ns/clock cycle 624 - * mult = (ns/c) * 2^APBT_SHIFT 625 - */ 626 - clocksource_apbt.mult = div_sc(MSEC_PER_SEC, 627 - (unsigned long) apbt_freq, APBT_SHIFT); 628 - clocksource_register(&clocksource_apbt); 621 + /* 622 + * initialize and register APBT clocksource 623 + * convert that to ns/clock cycle 624 + * mult = (ns/c) * 2^APBT_SHIFT 625 + */ 626 + clocksource_apbt.mult = div_sc(MSEC_PER_SEC, 627 + (unsigned long) apbt_freq, APBT_SHIFT); 628 + clocksource_register(&clocksource_apbt); 629 629 630 - return 0; 630 + return 0; 631 631 } 632 632 633 633 /* ··· 640 640 void __init apbt_time_init(void) 641 641 { 642 642 #ifdef CONFIG_SMP 643 - int i; 644 - struct sfi_timer_table_entry *p_mtmr; 645 - unsigned int percpu_timer; 646 - struct apbt_dev *adev; 643 + int i; 644 + struct sfi_timer_table_entry *p_mtmr; 645 + unsigned int percpu_timer; 646 + struct apbt_dev *adev; 647 647 #endif 648 648 649 - if (apb_timer_block_enabled) 650 - return; 651 - apbt_set_mapping(); 652 - if (apbt_virt_address) { 653 - pr_debug("Found APBT version 0x%lx\n",\ 654 - apbt_readl_reg(APBTMRS_COMP_VERSION)); 655 - } else 656 - goto out_noapbt; 657 - /* 658 - * Read the frequency and check for a sane value, for ESL model 659 - * we extend the possible clock range to allow time scaling. 660 - */ 649 + if (apb_timer_block_enabled) 650 + return; 651 + apbt_set_mapping(); 652 + if (apbt_virt_address) { 653 + pr_debug("Found APBT version 0x%lx\n",\ 654 + apbt_readl_reg(APBTMRS_COMP_VERSION)); 655 + } else 656 + goto out_noapbt; 657 + /* 658 + * Read the frequency and check for a sane value, for ESL model 659 + * we extend the possible clock range to allow time scaling. 660 + */ 661 661 662 - if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { 663 - pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq); 664 - goto out_noapbt; 665 - } 666 - if (apbt_clocksource_register()) { 667 - pr_debug("APBT has failed to register clocksource\n"); 668 - goto out_noapbt; 669 - } 670 - if (!apbt_clockevent_register()) 671 - apb_timer_block_enabled = 1; 672 - else { 673 - pr_debug("APBT has failed to register clockevent\n"); 674 - goto out_noapbt; 675 - } 662 + if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { 663 + pr_debug("APBT has invalid freq 0x%llx\n", apbt_freq); 664 + goto out_noapbt; 665 + } 666 + if (apbt_clocksource_register()) { 667 + pr_debug("APBT has failed to register clocksource\n"); 668 + goto out_noapbt; 669 + } 670 + if (!apbt_clockevent_register()) 671 + apb_timer_block_enabled = 1; 672 + else { 673 + pr_debug("APBT has failed to register clockevent\n"); 674 + goto out_noapbt; 675 + } 676 676 #ifdef CONFIG_SMP 677 - /* kernel cmdline disable apb timer, so we will use lapic timers */ 678 - if (disable_apbt_percpu) { 679 - printk(KERN_INFO "apbt: disabled per cpu timer\n"); 680 - return; 681 - } 682 - pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); 683 - if (num_possible_cpus() <= sfi_mtimer_num) { 684 - percpu_timer = 1; 685 - apbt_num_timers_used = num_possible_cpus(); 686 - } else { 687 - percpu_timer = 0; 688 - apbt_num_timers_used = 1; 689 - adev = &per_cpu(cpu_apbt_dev, 0); 690 - adev->flags &= ~APBT_DEV_USED; 691 - } 692 - pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 677 + /* kernel cmdline disable apb timer, so we will use lapic timers */ 678 + if (disable_apbt_percpu) { 679 + printk(KERN_INFO "apbt: disabled per cpu timer\n"); 680 + return; 681 + } 682 + pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); 683 + if (num_possible_cpus() <= sfi_mtimer_num) { 684 + percpu_timer = 1; 685 + apbt_num_timers_used = num_possible_cpus(); 686 + } else { 687 + percpu_timer = 0; 688 + apbt_num_timers_used = 1; 689 + adev = &per_cpu(cpu_apbt_dev, 0); 690 + adev->flags &= ~APBT_DEV_USED; 691 + } 692 + pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 693 693 694 - /* here we set up per CPU timer data structure */ 695 - apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used, 696 - GFP_KERNEL); 697 - if (!apbt_devs) { 698 - printk(KERN_ERR "Failed to allocate APB timer devices\n"); 699 - return; 700 - } 701 - for (i = 0; i < apbt_num_timers_used; i++) { 702 - adev = &per_cpu(cpu_apbt_dev, i); 703 - adev->num = i; 704 - adev->cpu = i; 705 - p_mtmr = sfi_get_mtmr(i); 706 - if (p_mtmr) { 707 - adev->tick = p_mtmr->freq_hz; 708 - adev->irq = p_mtmr->irq; 709 - } else 710 - printk(KERN_ERR "Failed to get timer for cpu %d\n", i); 711 - adev->count = 0; 712 - sprintf(adev->name, "apbt%d", i); 713 - } 694 + /* here we set up per CPU timer data structure */ 695 + apbt_devs = kzalloc(sizeof(struct apbt_dev) * apbt_num_timers_used, 696 + GFP_KERNEL); 697 + if (!apbt_devs) { 698 + printk(KERN_ERR "Failed to allocate APB timer devices\n"); 699 + return; 700 + } 701 + for (i = 0; i < apbt_num_timers_used; i++) { 702 + adev = &per_cpu(cpu_apbt_dev, i); 703 + adev->num = i; 704 + adev->cpu = i; 705 + p_mtmr = sfi_get_mtmr(i); 706 + if (p_mtmr) { 707 + adev->tick = p_mtmr->freq_hz; 708 + adev->irq = p_mtmr->irq; 709 + } else 710 + printk(KERN_ERR "Failed to get timer for cpu %d\n", i); 711 + adev->count = 0; 712 + sprintf(adev->name, "apbt%d", i); 713 + } 714 714 #endif 715 715 716 - return; 716 + return; 717 717 718 718 out_noapbt: 719 - apbt_clear_mapping(); 720 - apb_timer_block_enabled = 0; 721 - panic("failed to enable APB timer\n"); 719 + apbt_clear_mapping(); 720 + apb_timer_block_enabled = 0; 721 + panic("failed to enable APB timer\n"); 722 722 } 723 723 724 724 static inline void apbt_disable(int n) 725 725 { 726 - if (is_apbt_capable()) { 727 - unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 728 - ctrl &= ~APBTMR_CONTROL_ENABLE; 729 - apbt_writel(n, ctrl, APBTMR_N_CONTROL); 730 - } 726 + if (is_apbt_capable()) { 727 + unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); 728 + ctrl &= ~APBTMR_CONTROL_ENABLE; 729 + apbt_writel(n, ctrl, APBTMR_N_CONTROL); 730 + } 731 731 } 732 732 733 733 /* called before apb_timer_enable, use early map */ 734 734 unsigned long apbt_quick_calibrate() 735 735 { 736 - int i, scale; 737 - u64 old, new; 738 - cycle_t t1, t2; 739 - unsigned long khz = 0; 740 - u32 loop, shift; 736 + int i, scale; 737 + u64 old, new; 738 + cycle_t t1, t2; 739 + unsigned long khz = 0; 740 + u32 loop, shift; 741 741 742 - apbt_set_mapping(); 743 - apbt_start_counter(phy_cs_timer_id); 742 + apbt_set_mapping(); 743 + apbt_start_counter(phy_cs_timer_id); 744 744 745 - /* check if the timer can count down, otherwise return */ 746 - old = apbt_read_clocksource(&clocksource_apbt); 747 - i = 10000; 748 - while (--i) { 749 - if (old != apbt_read_clocksource(&clocksource_apbt)) 750 - break; 751 - } 752 - if (!i) 753 - goto failed; 745 + /* check if the timer can count down, otherwise return */ 746 + old = apbt_read_clocksource(&clocksource_apbt); 747 + i = 10000; 748 + while (--i) { 749 + if (old != apbt_read_clocksource(&clocksource_apbt)) 750 + break; 751 + } 752 + if (!i) 753 + goto failed; 754 754 755 - /* count 16 ms */ 756 - loop = (apbt_freq * 1000) << 4; 755 + /* count 16 ms */ 756 + loop = (apbt_freq * 1000) << 4; 757 757 758 - /* restart the timer to ensure it won't get to 0 in the calibration */ 759 - apbt_start_counter(phy_cs_timer_id); 758 + /* restart the timer to ensure it won't get to 0 in the calibration */ 759 + apbt_start_counter(phy_cs_timer_id); 760 760 761 - old = apbt_read_clocksource(&clocksource_apbt); 762 - old += loop; 761 + old = apbt_read_clocksource(&clocksource_apbt); 762 + old += loop; 763 763 764 - t1 = __native_read_tsc(); 764 + t1 = __native_read_tsc(); 765 765 766 - do { 767 - new = apbt_read_clocksource(&clocksource_apbt); 768 - } while (new < old); 766 + do { 767 + new = apbt_read_clocksource(&clocksource_apbt); 768 + } while (new < old); 769 769 770 - t2 = __native_read_tsc(); 770 + t2 = __native_read_tsc(); 771 771 772 - shift = 5; 773 - if (unlikely(loop >> shift == 0)) { 774 - printk(KERN_INFO 775 - "APBT TSC calibration failed, not enough resolution\n"); 776 - return 0; 777 - } 778 - scale = (int)div_u64((t2 - t1), loop >> shift); 779 - khz = (scale * apbt_freq * 1000) >> shift; 780 - printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); 781 - return khz; 772 + shift = 5; 773 + if (unlikely(loop >> shift == 0)) { 774 + printk(KERN_INFO 775 + "APBT TSC calibration failed, not enough resolution\n"); 776 + return 0; 777 + } 778 + scale = (int)div_u64((t2 - t1), loop >> shift); 779 + khz = (scale * apbt_freq * 1000) >> shift; 780 + printk(KERN_INFO "TSC freq calculated by APB timer is %lu khz\n", khz); 781 + return khz; 782 782 failed: 783 - return 0; 783 + return 0; 784 784 }