Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irqchip/gic: Make locking a BL_SWITCHER only feature

The BL switcher code manipulates the logical/physical CPU mapping,
forcing a lock to be taken on the IPI path. With an IPI heavy load,
this single lock becomes contended.

But when CONFIG_BL_SWITCHER is not enabled, there is no reason
to take this lock at all since the CPU mapping is immutable.

This patch allows the lock to be entierely removed when BL_SWITCHER
is not enabled (which is the case in most configurations), leading
to a small improvement of "perf bench sched pipe" (measured on
an 8 core AMD Seattle system):

Before: 101370 ops/sec
After: 103680 ops/sec

Take this opportunity to remove a useless lock being taken when
handling an interrupt on a secondary GIC.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

+27 -9
+27 -9
drivers/irqchip/irq-gic.c
··· 91 91 #endif 92 92 }; 93 93 94 - static DEFINE_RAW_SPINLOCK(irq_controller_lock); 94 + #ifdef CONFIG_BL_SWITCHER 95 + 96 + static DEFINE_RAW_SPINLOCK(cpu_map_lock); 97 + 98 + #define gic_lock_irqsave(f) \ 99 + raw_spin_lock_irqsave(&cpu_map_lock, (f)) 100 + #define gic_unlock_irqrestore(f) \ 101 + raw_spin_unlock_irqrestore(&cpu_map_lock, (f)) 102 + 103 + #define gic_lock() raw_spin_lock(&cpu_map_lock) 104 + #define gic_unlock() raw_spin_unlock(&cpu_map_lock) 105 + 106 + #else 107 + 108 + #define gic_lock_irqsave(f) do { (void)(f); } while(0) 109 + #define gic_unlock_irqrestore(f) do { (void)(f); } while(0) 110 + 111 + #define gic_lock() do { } while(0) 112 + #define gic_unlock() do { } while(0) 113 + 114 + #endif 95 115 96 116 /* 97 117 * The GIC mapping of CPU interfaces does not necessarily match ··· 337 317 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 338 318 return -EINVAL; 339 319 340 - raw_spin_lock_irqsave(&irq_controller_lock, flags); 320 + gic_lock_irqsave(flags); 341 321 mask = 0xff << shift; 342 322 bit = gic_cpu_map[cpu] << shift; 343 323 val = readl_relaxed(reg) & ~mask; 344 324 writel_relaxed(val | bit, reg); 345 - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 325 + gic_unlock_irqrestore(flags); 346 326 347 327 return IRQ_SET_MASK_OK_DONE; 348 328 } ··· 394 374 395 375 chained_irq_enter(chip, desc); 396 376 397 - raw_spin_lock(&irq_controller_lock); 398 377 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 399 - raw_spin_unlock(&irq_controller_lock); 400 378 401 379 gic_irq = (status & GICC_IAR_INT_ID_MASK); 402 380 if (gic_irq == GICC_INT_SPURIOUS) ··· 794 776 return; 795 777 } 796 778 797 - raw_spin_lock_irqsave(&irq_controller_lock, flags); 779 + gic_lock_irqsave(flags); 798 780 799 781 /* Convert our logical CPU mask into a physical one. */ 800 782 for_each_cpu(cpu, mask) ··· 809 791 /* this always happens on GIC0 */ 810 792 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 811 793 812 - raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 794 + gic_unlock_irqrestore(flags); 813 795 } 814 796 #endif 815 797 ··· 877 859 cur_target_mask = 0x01010101 << cur_cpu_id; 878 860 ror_val = (cur_cpu_id - new_cpu_id) & 31; 879 861 880 - raw_spin_lock(&irq_controller_lock); 862 + gic_lock(); 881 863 882 864 /* Update the target interface for this logical CPU */ 883 865 gic_cpu_map[cpu] = 1 << new_cpu_id; ··· 897 879 } 898 880 } 899 881 900 - raw_spin_unlock(&irq_controller_lock); 882 + gic_unlock(); 901 883 902 884 /* 903 885 * Now let's migrate and clear any potential SGIs that might be