MIPS: Octeon: Use lockless interrupt controller operations when possible.

Some newer Octeon chips have registers that allow lockless operation of
the interrupt controller. Take advantage of them.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by David Daney and committed by Ralf Baechle cd847b78 b6b74d54

+178 -36
+178 -36
arch/mips/cavium-octeon/octeon-irq.c
··· 17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); 18 DEFINE_SPINLOCK(octeon_irq_msi_lock); 19 20 static void octeon_irq_core_ack(unsigned int irq) 21 { 22 unsigned int bit = irq - OCTEON_IRQ_SW0; ··· 161 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 162 unsigned long flags; 163 uint64_t en0; 164 - #ifdef CONFIG_SMP 165 int cpu; 166 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); 167 for_each_online_cpu(cpu) { 168 - int coreid = cpu_logical_map(cpu); 169 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 170 en0 &= ~(1ull << bit); 171 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); ··· 175 */ 176 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 177 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); 178 - #else 179 - int coreid = cvmx_get_core_num(); 180 - local_irq_save(flags); 181 - en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 182 - en0 &= ~(1ull << bit); 183 - cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 184 - cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 185 - local_irq_restore(flags); 186 - #endif 187 } 188 189 #ifdef CONFIG_SMP ··· 225 226 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); 227 for_each_online_cpu(cpu) { 228 - int coreid = cpu_logical_map(cpu); 229 uint64_t en0 = 230 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 231 if (cpumask_test_cpu(cpu, dest)) ··· 243 244 return 0; 245 } 246 #endif 247 248 static struct irq_chip octeon_irq_chip_ciu0 = { 249 .name = "CIU0", ··· 342 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 343 unsigned long flags; 344 uint64_t en1; 345 - #ifdef CONFIG_SMP 346 int cpu; 347 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); 348 for_each_online_cpu(cpu) { 349 - int coreid = cpu_logical_map(cpu); 350 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 351 en1 &= ~(1ull << bit); 352 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); ··· 356 */ 357 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 358 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); 359 - #else 360 - int coreid = cvmx_get_core_num(); 361 - local_irq_save(flags); 362 - en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 363 - en1 &= ~(1ull << bit); 364 - cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 365 - cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 366 - local_irq_restore(flags); 367 - #endif 368 } 369 370 #ifdef CONFIG_SMP 371 - static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) 372 { 373 int cpu; 374 unsigned long flags; ··· 407 408 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); 409 for_each_online_cpu(cpu) { 410 - int coreid = cpu_logical_map(cpu); 411 uint64_t en1 = 412 cvmx_read_csr(CVMX_CIU_INTX_EN1 413 (coreid * 2 + 1)); ··· 426 427 return 0; 428 } 429 #endif 430 431 static struct irq_chip octeon_irq_chip_ciu1 = { 432 .name = "CIU1", ··· 558 void __init arch_init_irq(void) 559 { 560 int irq; 561 562 #ifdef CONFIG_SMP 563 /* Set the default affinity to the boot cpu. */ ··· 569 570 if (NR_IRQS < OCTEON_IRQ_LAST) 571 pr_err("octeon_irq_init: NR_IRQS is set too low\n"); 572 573 /* 0 - 15 reserved for i8259 master and slave controller. */ 574 ··· 590 591 /* 24 - 87 CIU_INT_SUM0 */ 592 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 593 - set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0, 594 - handle_percpu_irq); 595 } 596 597 /* 88 - 151 CIU_INT_SUM1 */ 598 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { 599 - set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1, 600 - handle_percpu_irq); 601 } 602 603 #ifdef CONFIG_PCI_MSI ··· 653 #ifdef CONFIG_HOTPLUG_CPU 654 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) 655 { 656 - unsigned int isset; 657 - #ifdef CONFIG_SMP 658 - int coreid = cpu_logical_map(cpu); 659 - #else 660 - int coreid = cvmx_get_core_num(); 661 - #endif 662 int bit = (irq < OCTEON_IRQ_WDOG0) ? 663 - irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; 664 if (irq < 64) { 665 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & 666 (1ull << bit)) >> bit;
··· 17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); 18 DEFINE_SPINLOCK(octeon_irq_msi_lock); 19 20 + static int octeon_coreid_for_cpu(int cpu) 21 + { 22 + #ifdef CONFIG_SMP 23 + return cpu_logical_map(cpu); 24 + #else 25 + return cvmx_get_core_num(); 26 + #endif 27 + } 28 + 29 static void octeon_irq_core_ack(unsigned int irq) 30 { 31 unsigned int bit = irq - OCTEON_IRQ_SW0; ··· 152 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 153 unsigned long flags; 154 uint64_t en0; 155 int cpu; 156 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); 157 for_each_online_cpu(cpu) { 158 + int coreid = octeon_coreid_for_cpu(cpu); 159 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 160 en0 &= ~(1ull << bit); 161 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); ··· 167 */ 168 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 169 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); 170 + } 171 + 172 + /* 173 + * Enable the irq on the current core for chips that have the EN*_W1{S,C} 174 + * registers. 175 + */ 176 + static void octeon_irq_ciu0_enable_v2(unsigned int irq) 177 + { 178 + int index = cvmx_get_core_num() * 2; 179 + u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 180 + 181 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 182 + } 183 + 184 + /* 185 + * Disable the irq on the current core for chips that have the EN*_W1{S,C} 186 + * registers. 187 + */ 188 + static void octeon_irq_ciu0_disable_v2(unsigned int irq) 189 + { 190 + int index = cvmx_get_core_num() * 2; 191 + u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 192 + 193 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 194 + } 195 + 196 + /* 197 + * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 198 + * registers. 199 + */ 200 + static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) 201 + { 202 + u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 203 + int index; 204 + int cpu; 205 + for_each_online_cpu(cpu) { 206 + index = octeon_coreid_for_cpu(cpu) * 2; 207 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 208 + } 209 } 210 211 #ifdef CONFIG_SMP ··· 187 188 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); 189 for_each_online_cpu(cpu) { 190 + int coreid = octeon_coreid_for_cpu(cpu); 191 uint64_t en0 = 192 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 193 if (cpumask_test_cpu(cpu, dest)) ··· 205 206 return 0; 207 } 208 + 209 + /* 210 + * Set affinity for the irq for chips that have the EN*_W1{S,C} 211 + * registers. 212 + */ 213 + static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, 214 + const struct cpumask *dest) 215 + { 216 + int cpu; 217 + int index; 218 + u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 219 + for_each_online_cpu(cpu) { 220 + index = octeon_coreid_for_cpu(cpu) * 2; 221 + if (cpumask_test_cpu(cpu, dest)) 222 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 223 + else 224 + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 225 + } 226 + return 0; 227 + } 228 #endif 229 + 230 + /* 231 + * Newer octeon chips have support for lockless CIU operation. 232 + */ 233 + static struct irq_chip octeon_irq_chip_ciu0_v2 = { 234 + .name = "CIU0", 235 + .enable = octeon_irq_ciu0_enable_v2, 236 + .disable = octeon_irq_ciu0_disable_all_v2, 237 + .ack = octeon_irq_ciu0_disable_v2, 238 + .eoi = octeon_irq_ciu0_enable_v2, 239 + #ifdef CONFIG_SMP 240 + .set_affinity = octeon_irq_ciu0_set_affinity_v2, 241 + #endif 242 + }; 243 244 static struct irq_chip octeon_irq_chip_ciu0 = { 245 .name = "CIU0", ··· 270 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 271 unsigned long flags; 272 uint64_t en1; 273 int cpu; 274 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); 275 for_each_online_cpu(cpu) { 276 + int coreid = octeon_coreid_for_cpu(cpu); 277 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 278 en1 &= ~(1ull << bit); 279 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); ··· 285 */ 286 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 287 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); 288 + } 289 + 290 + /* 291 + * Enable the irq on the current core for chips that have the EN*_W1{S,C} 292 + * registers. 293 + */ 294 + static void octeon_irq_ciu1_enable_v2(unsigned int irq) 295 + { 296 + int index = cvmx_get_core_num() * 2 + 1; 297 + u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 298 + 299 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 300 + } 301 + 302 + /* 303 + * Disable the irq on the current core for chips that have the EN*_W1{S,C} 304 + * registers. 305 + */ 306 + static void octeon_irq_ciu1_disable_v2(unsigned int irq) 307 + { 308 + int index = cvmx_get_core_num() * 2 + 1; 309 + u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 310 + 311 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 312 + } 313 + 314 + /* 315 + * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 316 + * registers. 317 + */ 318 + static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) 319 + { 320 + u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 321 + int index; 322 + int cpu; 323 + for_each_online_cpu(cpu) { 324 + index = octeon_coreid_for_cpu(cpu) * 2 + 1; 325 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 326 + } 327 } 328 329 #ifdef CONFIG_SMP 330 + static int octeon_irq_ciu1_set_affinity(unsigned int irq, 331 + const struct cpumask *dest) 332 { 333 int cpu; 334 unsigned long flags; ··· 305 306 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); 307 for_each_online_cpu(cpu) { 308 + int coreid = octeon_coreid_for_cpu(cpu); 309 uint64_t en1 = 310 cvmx_read_csr(CVMX_CIU_INTX_EN1 311 (coreid * 2 + 1)); ··· 324 325 return 0; 326 } 327 + 328 + /* 329 + * Set affinity for the irq for chips that have the EN*_W1{S,C} 330 + * registers. 331 + */ 332 + static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, 333 + const struct cpumask *dest) 334 + { 335 + int cpu; 336 + int index; 337 + u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 338 + for_each_online_cpu(cpu) { 339 + index = octeon_coreid_for_cpu(cpu) * 2 + 1; 340 + if (cpumask_test_cpu(cpu, dest)) 341 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 342 + else 343 + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 344 + } 345 + return 0; 346 + } 347 #endif 348 + 349 + /* 350 + * Newer octeon chips have support for lockless CIU operation. 351 + */ 352 + static struct irq_chip octeon_irq_chip_ciu1_v2 = { 353 + .name = "CIU0", 354 + .enable = octeon_irq_ciu1_enable_v2, 355 + .disable = octeon_irq_ciu1_disable_all_v2, 356 + .ack = octeon_irq_ciu1_disable_v2, 357 + .eoi = octeon_irq_ciu1_enable_v2, 358 + #ifdef CONFIG_SMP 359 + .set_affinity = octeon_irq_ciu1_set_affinity_v2, 360 + #endif 361 + }; 362 363 static struct irq_chip octeon_irq_chip_ciu1 = { 364 .name = "CIU1", ··· 422 void __init arch_init_irq(void) 423 { 424 int irq; 425 + struct irq_chip *chip0; 426 + struct irq_chip *chip1; 427 428 #ifdef CONFIG_SMP 429 /* Set the default affinity to the boot cpu. */ ··· 431 432 if (NR_IRQS < OCTEON_IRQ_LAST) 433 pr_err("octeon_irq_init: NR_IRQS is set too low\n"); 434 + 435 + if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 436 + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 437 + OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { 438 + chip0 = &octeon_irq_chip_ciu0_v2; 439 + chip1 = &octeon_irq_chip_ciu1_v2; 440 + } else { 441 + chip0 = &octeon_irq_chip_ciu0; 442 + chip1 = &octeon_irq_chip_ciu1; 443 + } 444 445 /* 0 - 15 reserved for i8259 master and slave controller. */ 446 ··· 442 443 /* 24 - 87 CIU_INT_SUM0 */ 444 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 445 + set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); 446 } 447 448 /* 88 - 151 CIU_INT_SUM1 */ 449 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { 450 + set_irq_chip_and_handler(irq, chip1, handle_percpu_irq); 451 } 452 453 #ifdef CONFIG_PCI_MSI ··· 507 #ifdef CONFIG_HOTPLUG_CPU 508 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) 509 { 510 + unsigned int isset; 511 + int coreid = octeon_coreid_for_cpu(cpu); 512 int bit = (irq < OCTEON_IRQ_WDOG0) ? 513 + irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; 514 if (irq < 64) { 515 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & 516 (1ull << bit)) >> bit;