Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sparseirq: Use radix_tree instead of ptrs array

Use radix_tree irq_desc_tree instead of irq_desc_ptrs.

-v2: according to Eric and cyrill to use radix_tree_lookup_slot and
radix_tree_replace_slot

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1265793639-15071-32-git-send-email-yinghai@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

authored by

Yinghai Lu and committed by
H. Peter Anvin
b5eb78f7 99558f0b

+25 -24
+25 -24
kernel/irq/handle.c
··· 19 19 #include <linux/kernel_stat.h> 20 20 #include <linux/rculist.h> 21 21 #include <linux/hash.h> 22 + #include <linux/radix-tree.h> 22 23 #include <trace/events/irq.h> 23 24 24 25 #include "internals.h" ··· 128 127 */ 129 128 DEFINE_RAW_SPINLOCK(sparse_irq_lock); 130 129 131 - static struct irq_desc **irq_desc_ptrs __read_mostly; 130 + static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 131 + 132 + static void set_irq_desc(unsigned int irq, struct irq_desc *desc) 133 + { 134 + radix_tree_insert(&irq_desc_tree, irq, desc); 135 + } 136 + 137 + struct irq_desc *irq_to_desc(unsigned int irq) 138 + { 139 + return radix_tree_lookup(&irq_desc_tree, irq); 140 + } 141 + 142 + void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 143 + { 144 + void **ptr; 145 + 146 + ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); 147 + if (ptr) 148 + radix_tree_replace_slot(ptr, desc); 149 + } 132 150 133 151 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 134 152 [0 ... NR_IRQS_LEGACY-1] = { ··· 179 159 legacy_count = ARRAY_SIZE(irq_desc_legacy); 180 160 node = first_online_node; 181 161 182 - /* allocate irq_desc_ptrs array based on nr_irqs */ 183 - irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); 184 - 185 162 /* allocate based on nr_cpu_ids */ 186 163 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 187 164 sizeof(int), GFP_NOWAIT, node); ··· 192 175 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 193 176 alloc_desc_masks(&desc[i], node, true); 194 177 init_desc_masks(&desc[i]); 195 - irq_desc_ptrs[i] = desc + i; 178 + set_irq_desc(i, &desc[i]); 196 179 } 197 180 198 - for (i = legacy_count; i < nr_irqs; i++) 199 - irq_desc_ptrs[i] = NULL; 200 - 201 181 return arch_early_irq_init(); 202 - } 203 - 204 - struct irq_desc *irq_to_desc(unsigned int irq) 205 - { 206 - if (irq_desc_ptrs && irq < nr_irqs) 207 - return irq_desc_ptrs[irq]; 208 - 209 - return NULL; 210 - } 211 - 212 - void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 213 - { 214 - irq_desc_ptrs[irq] = desc; 215 182 } 216 183 217 184 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) ··· 209 208 return NULL; 210 209 } 211 210 212 - desc = irq_desc_ptrs[irq]; 211 + desc = irq_to_desc(irq); 213 212 if (desc) 214 213 return desc; 215 214 216 215 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 217 216 218 217 /* We have to check it to avoid races with another CPU */ 219 - desc = irq_desc_ptrs[irq]; 218 + desc = irq_to_desc(irq); 220 219 if (desc) 221 220 goto out_unlock; 222 221 ··· 229 228 } 230 229 init_one_irq_desc(irq, desc, node); 231 230 232 - irq_desc_ptrs[irq] = desc; 231 + set_irq_desc(irq, desc); 233 232 234 233 out_unlock: 235 234 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);