Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "Loongarch: Support loongarch avec"

This reverts commit 760d7e719499d64beea62bfcf53938fb233bb6e7.

This results in build failures and has other issues according to Tianyang.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Tianyang Zhang <zhangtianyang@loongson.cn>
Closes: https://lore.kernel.org/oe-kbuild-all/202406240451.ygBFNyJ3-lkp@intel.com/

+10 -517
-1
arch/loongarch/Kconfig
··· 83 83 select GENERIC_ENTRY 84 84 select GENERIC_GETTIMEOFDAY 85 85 select GENERIC_IOREMAP if !ARCH_IOREMAP 86 - select GENERIC_IRQ_MATRIX_ALLOCATOR 87 86 select GENERIC_IRQ_MULTI_HANDLER 88 87 select GENERIC_IRQ_PROBE 89 88 select GENERIC_IRQ_SHOW
-1
arch/loongarch/include/asm/cpu-features.h
··· 65 65 #define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) 66 66 #define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) 67 67 #define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) 68 - #define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT) 69 68 70 69 #endif /* __ASM_CPU_FEATURES_H */
-2
arch/loongarch/include/asm/cpu.h
··· 99 99 #define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ 100 100 #define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ 101 101 #define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ 102 - #define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */ 103 102 104 103 #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) 105 104 #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) ··· 127 128 #define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) 128 129 #define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) 129 130 #define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) 130 - #define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT) 131 131 132 132 #endif /* _ASM_CPU_H */
-10
arch/loongarch/include/asm/hw_irq.h
··· 10 10 extern atomic_t irq_err_count; 11 11 12 12 /* 13 - * 256 vectors Map: 14 - * 15 - * 0 - 15: mapping legacy IPs, e.g. IP0-12. 16 - * 16 - 255: mapping a vector for external IRQ. 17 - * 18 - */ 19 - #define NR_VECTORS 256 20 - #define IRQ_MATRIX_BITS NR_VECTORS 21 - #define NR_LEGACY_VECTORS 16 22 - /* 23 13 * interrupt-retrigger: NOP for now. This may not be appropriate for all 24 14 * machines, we'll see ... 25 15 */
+1 -11
arch/loongarch/include/asm/irq.h
··· 65 65 #define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) 66 66 67 67 #define LOONGSON_CPU_IRQ_BASE 16 68 - #define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15) 68 + #define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) 69 69 70 70 #define LOONGSON_PCH_IRQ_BASE 64 71 71 #define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) ··· 101 101 struct acpi_madt_msi_pic *acpi_pchmsi); 102 102 int pch_pic_acpi_init(struct irq_domain *parent, 103 103 struct acpi_madt_bio_pic *acpi_pchpic); 104 - 105 - #ifdef CONFIG_ACPI 106 - int __init pch_msi_acpi_init_v2(struct irq_domain *parent, 107 - struct acpi_madt_msi_pic *pch_msi_entry); 108 - int __init loongarch_avec_acpi_init(struct irq_domain *parent); 109 - void complete_irq_moving(void); 110 - void loongarch_avec_offline_cpu(unsigned int cpu); 111 - void loongarch_avec_online_cpu(unsigned int cpu); 112 - #endif 113 - 114 104 int find_pch_pic(u32 gsi); 115 105 struct fwnode_handle *get_pch_msi_handle(int pci_segment); 116 106
+5 -15
arch/loongarch/include/asm/loongarch.h
··· 72 72 #define CPUCFG1_RPLV BIT(23) 73 73 #define CPUCFG1_HUGEPG BIT(24) 74 74 #define CPUCFG1_CRC32 BIT(25) 75 + #define CPUCFG1_MSGINT BIT(26) 75 76 76 77 #define LOONGARCH_CPUCFG2 0x2 77 78 #define CPUCFG2_FP BIT(0) ··· 252 251 #define CSR_ESTAT_EXC_WIDTH 6 253 252 #define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) 254 253 #define CSR_ESTAT_IS_SHIFT 0 255 - #define CSR_ESTAT_IS_WIDTH 15 256 - #define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) 254 + #define CSR_ESTAT_IS_WIDTH 14 255 + #define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) 257 256 258 257 #define LOONGARCH_CSR_ERA 0x6 /* ERA */ 259 258 ··· 999 998 #define CSR_FWPC_SKIP_SHIFT 16 1000 999 #define CSR_FWPC_SKIP (_ULCAST_(1) << CSR_FWPC_SKIP_SHIFT) 1001 1000 1002 - #define LOONGARCH_CSR_IRR0 0xa0 1003 - #define LOONGARCH_CSR_IRR1 0xa1 1004 - #define LOONGARCH_CSR_IRR2 0xa2 1005 - #define LOONGARCH_CSR_IRR3 0xa3 1006 - #define LOONGARCH_CSR_IRR_BASE LOONGARCH_CSR_IRR0 1007 - 1008 - #define LOONGARCH_CSR_ILR 0xa4 1009 - 1010 1001 /* 1011 1002 * CSR_ECFG IM 1012 1003 */ 1013 - #define ECFG0_IM 0x00005fff 1004 + #define ECFG0_IM 0x00001fff 1014 1005 #define ECFGB_SIP0 0 1015 1006 #define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) 1016 1007 #define ECFGB_SIP1 1 ··· 1045 1052 #define IOCSRF_EIODECODE BIT_ULL(9) 1046 1053 #define IOCSRF_FLATMODE BIT_ULL(10) 1047 1054 #define IOCSRF_VM BIT_ULL(11) 1048 - #define IOCSRF_AVEC BIT_ULL(15) 1049 1055 1050 1056 #define LOONGARCH_IOCSR_VENDOR 0x10 1051 1057 ··· 1055 1063 #define LOONGARCH_IOCSR_MISC_FUNC 0x420 1056 1064 #define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) 1057 1065 #define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) 1058 - #define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51) 1059 1066 1060 1067 #define LOONGARCH_IOCSR_CPUTEMP 0x428 1061 1068 ··· 1375 1384 #define INT_TI 11 /* Timer */ 1376 1385 #define INT_IPI 12 1377 1386 #define INT_NMI 13 1378 - #define INT_AVEC 14 1379 1387 1380 1388 /* ExcCodes corresponding to interrupts */ 1381 - #define EXCCODE_INT_NUM (INT_AVEC + 1) 1389 + #define EXCCODE_INT_NUM (INT_NMI + 1) 1382 1390 #define EXCCODE_INT_START 64 1383 1391 #define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1) 1384 1392
-2
arch/loongarch/include/asm/smp.h
··· 69 69 #define ACTION_BOOT_CPU 0 70 70 #define ACTION_RESCHEDULE 1 71 71 #define ACTION_CALL_FUNCTION 2 72 - #define ACTION_CLEAR_VECT 3 73 72 #define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) 74 73 #define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) 75 74 #define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) 76 - #define SMP_CLEAR_VECT BIT(ACTION_CLEAR_VECT) 77 75 78 76 struct secondary_data { 79 77 unsigned long stack;
+1 -2
arch/loongarch/kernel/cpu-probe.c
··· 106 106 elf_hwcap |= HWCAP_LOONGARCH_CRC32; 107 107 } 108 108 109 + 109 110 config = read_cpucfg(LOONGARCH_CPUCFG2); 110 111 if (config & CPUCFG2_LAM) { 111 112 c->options |= LOONGARCH_CPU_LAM; ··· 176 175 c->options |= LOONGARCH_CPU_EIODECODE; 177 176 if (config & IOCSRF_VM) 178 177 c->options |= LOONGARCH_CPU_HYPERVISOR; 179 - if (config & IOCSRF_AVEC) 180 - c->options |= LOONGARCH_CPU_AVECINT; 181 178 182 179 config = csr_read32(LOONGARCH_CSR_ASID); 183 180 config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
-5
arch/loongarch/kernel/smp.c
··· 234 234 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++; 235 235 } 236 236 237 - if (action & SMP_CLEAR_VECT) 238 - complete_irq_moving(); 239 - 240 237 return IRQ_HANDLED; 241 238 } 242 239 ··· 388 391 irq_migrate_all_off_this_cpu(); 389 392 clear_csr_ecfg(ECFG0_IM); 390 393 local_irq_restore(flags); 391 - loongarch_avec_offline_cpu(cpu); 392 394 local_flush_tlb_all(); 393 395 394 396 return 0; ··· 566 570 * early is dangerous. 567 571 */ 568 572 WARN_ON_ONCE(!irqs_disabled()); 569 - loongarch_avec_online_cpu(cpu); 570 573 loongson_smp_finish(); 571 574 572 575 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+1 -1
drivers/irqchip/Makefile
··· 110 110 obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o 111 111 obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o 112 112 obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o 113 - obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o 113 + obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o 114 114 obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o 115 115 obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o 116 116 obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o
-419
drivers/irqchip/irq-loongarch-avec.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Copyright (C) 2020 Loongson Technologies, Inc. 4 - */ 5 - 6 - #include <linux/cpuhotplug.h> 7 - #include <linux/init.h> 8 - #include <linux/interrupt.h> 9 - #include <linux/irq.h> 10 - #include <linux/irqchip.h> 11 - #include <linux/irqchip/chained_irq.h> 12 - #include <linux/irqdomain.h> 13 - #include <linux/kernel.h> 14 - #include <linux/msi.h> 15 - #include <linux/radix-tree.h> 16 - #include <linux/spinlock.h> 17 - 18 - #include <asm/loongarch.h> 19 - #include <asm/setup.h> 20 - 21 - #define VECTORS_PER_REG 64 22 - #define ILR_INVALID_MASK 0x80000000UL 23 - #define ILR_VECTOR_MASK 0xffUL 24 - #define AVEC_MSG_OFFSET 0x100000 25 - 26 - static phys_addr_t msi_base_v2; 27 - static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); 28 - 29 - struct pending_list { 30 - struct list_head head; 31 - }; 32 - 33 - static DEFINE_PER_CPU(struct pending_list, pending_list); 34 - 35 - struct loongarch_avec_chip { 36 - struct fwnode_handle *fwnode; 37 - struct irq_domain *domain; 38 - struct irq_matrix *vector_matrix; 39 - raw_spinlock_t lock; 40 - }; 41 - 42 - static struct loongarch_avec_chip loongarch_avec; 43 - 44 - struct loongarch_avec_data { 45 - struct list_head entry; 46 - unsigned int cpu; 47 - unsigned int vec; 48 - unsigned int prev_cpu; 49 - unsigned int prev_vec; 50 - unsigned int moving : 1, 51 - managed : 1; 52 - }; 53 - 54 - static struct cpumask intersect_mask; 55 - 56 - static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest, 57 - unsigned int *cpu) 58 - { 59 - return irq_matrix_alloc(loongarch_avec.vector_matrix, dest, false, cpu); 60 - } 61 - 62 - static inline void loongarch_avec_ack_irq(struct irq_data *d) 63 - { 64 - } 65 - 66 - static inline void loongarch_avec_unmask_irq(struct irq_data *d) 67 - { 68 - } 69 - 70 - static inline void loongarch_avec_mask_irq(struct irq_data *d) 71 - { 72 - } 73 - 74 - static void loongarch_avec_sync(struct loongarch_avec_data *adata) 75 - { 76 - struct pending_list *plist; 77 - 78 - if (cpu_online(adata->prev_cpu)) { 79 - plist = per_cpu_ptr(&pending_list, adata->prev_cpu); 80 - list_add_tail(&adata->entry, &plist->head); 81 - adata->moving = true; 82 - loongson_send_ipi_single(adata->prev_cpu, SMP_CLEAR_VECT); 83 - } 84 - adata->prev_cpu = adata->cpu; 85 - adata->prev_vec = adata->vec; 86 - } 87 - 88 - static int loongarch_avec_set_affinity(struct irq_data *data, const struct cpumask *dest, 89 - bool force) 90 - { 91 - struct loongarch_avec_data *adata; 92 - unsigned int cpu, vector; 93 - unsigned long flags; 94 - int ret; 95 - 96 - raw_spin_lock_irqsave(&loongarch_avec.lock, flags); 97 - adata = irq_data_get_irq_chip_data(data); 98 - 99 - if (adata->vec && cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) { 100 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 101 - return 0; 102 - } 103 - if (adata->moving) 104 - return -EBUSY; 105 - 106 - cpumask_and(&intersect_mask, dest, cpu_online_mask); 107 - 108 - ret = assign_irq_vector(data, &intersect_mask, &cpu); 109 - if (ret < 0) { 110 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 111 - return ret; 112 - } 113 - vector = ret; 114 - adata->cpu = cpu; 115 - adata->vec = vector; 116 - per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); 117 - loongarch_avec_sync(adata); 118 - 119 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 120 - irq_data_update_effective_affinity(data, cpumask_of(cpu)); 121 - 122 - return IRQ_SET_MASK_OK; 123 - } 124 - 125 - static void loongarch_avec_compose_msg(struct irq_data *d, 126 - struct msi_msg *msg) 127 - { 128 - struct loongarch_avec_data *avec_data; 129 - 130 - avec_data = irq_data_get_irq_chip_data(d); 131 - 132 - msg->address_hi = 0x0; 133 - msg->address_lo = msi_base_v2 | ((avec_data->vec & 0xff) << 4) | 134 - ((cpu_logical_map(avec_data->cpu & 0xffff)) << 12); 135 - msg->data = 0x0; 136 - 137 - } 138 - 139 - static struct irq_chip loongarch_avec_controller = { 140 - .name = "CORE_AVEC", 141 - .irq_ack = loongarch_avec_ack_irq, 142 - .irq_mask = loongarch_avec_mask_irq, 143 - .irq_unmask = loongarch_avec_unmask_irq, 144 - .irq_set_affinity = loongarch_avec_set_affinity, 145 - .irq_compose_msi_msg = loongarch_avec_compose_msg, 146 - }; 147 - 148 - void complete_irq_moving(void) 149 - { 150 - struct pending_list *plist = this_cpu_ptr(&pending_list); 151 - struct loongarch_avec_data *adata, *tmp; 152 - int cpu, vector, bias; 153 - u64 irr; 154 - 155 - raw_spin_lock(&loongarch_avec.lock); 156 - 157 - list_for_each_entry_safe(adata, tmp, &plist->head, entry) { 158 - cpu = adata->prev_cpu; 159 - vector = adata->prev_vec; 160 - bias = vector / VECTORS_PER_REG; 161 - switch (bias) { 162 - case 0: 163 - irr = csr_read64(LOONGARCH_CSR_IRR0); 164 - case 1: 165 - irr = csr_read64(LOONGARCH_CSR_IRR1); 166 - case 2: 167 - irr = csr_read64(LOONGARCH_CSR_IRR2); 168 - case 3: 169 - irr = csr_read64(LOONGARCH_CSR_IRR3); 170 - } 171 - 172 - if (irr & (1UL << (vector % VECTORS_PER_REG))) { 173 - loongson_send_ipi_single(cpu, SMP_CLEAR_VECT); 174 - continue; 175 - } 176 - list_del(&adata->entry); 177 - irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, adata->managed); 178 - this_cpu_write(irq_map[vector], NULL); 179 - adata->moving = 0; 180 - } 181 - raw_spin_unlock(&loongarch_avec.lock); 182 - } 183 - 184 - static void loongarch_avec_dispatch(struct irq_desc *desc) 185 - { 186 - struct irq_chip *chip = irq_desc_get_chip(desc); 187 - unsigned long vector; 188 - struct irq_desc *d; 189 - 190 - chained_irq_enter(chip, desc); 191 - vector = csr_read64(LOONGARCH_CSR_ILR); 192 - if (vector & ILR_INVALID_MASK) 193 - return; 194 - 195 - vector &= ILR_VECTOR_MASK; 196 - 197 - d = this_cpu_read(irq_map[vector]); 198 - if (d) { 199 - generic_handle_irq_desc(d); 200 - } else { 201 - pr_warn("IRQ ERROR:Unexpected irq occur on cpu %d[vector %ld]\n", 202 - smp_processor_id(), vector); 203 - } 204 - 205 - chained_irq_exit(chip, desc); 206 - } 207 - 208 - static int loongarch_avec_alloc(struct irq_domain *domain, unsigned int virq, 209 - unsigned int nr_irqs, void *arg) 210 - { 211 - struct loongarch_avec_data *adata; 212 - struct irq_data *irqd; 213 - unsigned int cpu, vector, i, ret; 214 - unsigned long flags; 215 - 216 - raw_spin_lock_irqsave(&loongarch_avec.lock, flags); 217 - for (i = 0; i < nr_irqs; i++) { 218 - irqd = irq_domain_get_irq_data(domain, virq + i); 219 - adata = kzalloc(sizeof(*adata), GFP_KERNEL); 220 - if (!adata) { 221 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 222 - return -ENOMEM; 223 - } 224 - ret = assign_irq_vector(irqd, cpu_online_mask, &cpu); 225 - if (ret < 0) { 226 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 227 - return ret; 228 - } 229 - vector = ret; 230 - adata->prev_cpu = adata->cpu = cpu; 231 - adata->prev_vec = adata->vec = vector; 232 - adata->managed = irqd_affinity_is_managed(irqd); 233 - irq_domain_set_info(domain, virq + i, virq + i, &loongarch_avec_controller, 234 - adata, handle_edge_irq, NULL, NULL); 235 - adata->moving = 0; 236 - irqd_set_single_target(irqd); 237 - irqd_set_affinity_on_activate(irqd); 238 - 239 - per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); 240 - } 241 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 242 - 243 - return 0; 244 - } 245 - 246 - static void clear_free_vector(struct irq_data *irqd) 247 - { 248 - struct loongarch_avec_data *adata = irq_data_get_irq_chip_data(irqd); 249 - bool managed = irqd_affinity_is_managed(irqd); 250 - 251 - per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; 252 - irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, managed); 253 - adata->cpu = 0; 254 - adata->vec = 0; 255 - if (!adata->moving) 256 - return; 257 - 258 - per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = 0; 259 - irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, 260 - adata->prev_vec, adata->managed); 261 - adata->prev_vec = 0; 262 - adata->prev_cpu = 0; 263 - adata->moving = 0; 264 - list_del_init(&adata->entry); 265 - } 266 - 267 - static void loongarch_avec_free(struct irq_domain *domain, unsigned int virq, 268 - unsigned int nr_irqs) 269 - { 270 - struct irq_data *d; 271 - unsigned long flags; 272 - unsigned int i; 273 - 274 - raw_spin_lock_irqsave(&loongarch_avec.lock, flags); 275 - for (i = 0; i < nr_irqs; i++) { 276 - d = irq_domain_get_irq_data(domain, virq + i); 277 - if (d) { 278 - clear_free_vector(d); 279 - irq_domain_reset_irq_data(d); 280 - 281 - } 282 - } 283 - 284 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 285 - } 286 - 287 - static const struct irq_domain_ops loongarch_avec_domain_ops = { 288 - .alloc = loongarch_avec_alloc, 289 - .free = loongarch_avec_free, 290 - }; 291 - 292 - static int __init irq_matrix_init(void) 293 - { 294 - int i; 295 - 296 - loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS - 1); 297 - if (!loongarch_avec.vector_matrix) 298 - return -ENOMEM; 299 - for (i = 0; i < NR_LEGACY_VECTORS; i++) 300 - irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); 301 - 302 - irq_matrix_online(loongarch_avec.vector_matrix); 303 - 304 - return 0; 305 - } 306 - 307 - static int __init loongarch_avec_init(struct irq_domain *parent) 308 - { 309 - struct pending_list *plist = per_cpu_ptr(&pending_list, 0); 310 - int ret = 0, parent_irq; 311 - unsigned long tmp; 312 - 313 - raw_spin_lock_init(&loongarch_avec.lock); 314 - 315 - loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("CORE_AVEC"); 316 - if (!loongarch_avec.fwnode) { 317 - pr_err("Unable to allocate domain handle\n"); 318 - ret = -ENOMEM; 319 - goto out; 320 - } 321 - 322 - loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, 323 - &loongarch_avec_domain_ops, NULL); 324 - if (!loongarch_avec.domain) { 325 - pr_err("core-vec: cannot create IRQ domain\n"); 326 - ret = -ENOMEM; 327 - goto out_free_handle; 328 - } 329 - 330 - parent_irq = irq_create_mapping(parent, INT_AVEC); 331 - if (!parent_irq) { 332 - pr_err("Failed to mapping hwirq\n"); 333 - ret = -EINVAL; 334 - goto out_remove_domain; 335 - } 336 - irq_set_chained_handler_and_data(parent_irq, loongarch_avec_dispatch, NULL); 337 - 338 - ret = irq_matrix_init(); 339 - if (ret) { 340 - pr_err("Failed to init irq matrix\n"); 341 - goto out_free_matrix; 342 - } 343 - 344 - INIT_LIST_HEAD(&plist->head); 345 - tmp = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); 346 - tmp |= IOCSR_MISC_FUNC_AVEC_EN; 347 - iocsr_write64(tmp, LOONGARCH_IOCSR_MISC_FUNC); 348 - 349 - return ret; 350 - 351 - out_free_matrix: 352 - kfree(loongarch_avec.vector_matrix); 353 - out_remove_domain: 354 - irq_domain_remove(loongarch_avec.domain); 355 - out_free_handle: 356 - irq_domain_free_fwnode(loongarch_avec.fwnode); 357 - out: 358 - return ret; 359 - } 360 - 361 - void loongarch_avec_offline_cpu(unsigned int cpu) 362 - { 363 - struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); 364 - unsigned long flags; 365 - 366 - raw_spin_lock_irqsave(&loongarch_avec.lock, flags); 367 - if (list_empty(&plist->head)) 368 - irq_matrix_offline(loongarch_avec.vector_matrix); 369 - else 370 - pr_warn("cpu %d advanced extioi is busy\n", cpu); 371 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 372 - } 373 - 374 - void loongarch_avec_online_cpu(unsigned int cpu) 375 - { 376 - struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); 377 - unsigned long flags; 378 - 379 - raw_spin_lock_irqsave(&loongarch_avec.lock, flags); 380 - 381 - irq_matrix_online(loongarch_avec.vector_matrix); 382 - 383 - INIT_LIST_HEAD(&plist->head); 384 - 385 - raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); 386 - } 387 - 388 - static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, 389 - const unsigned long end) 390 - { 391 - struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; 392 - 393 - msi_base_v2 = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; 394 - return pch_msi_acpi_init_v2(loongarch_avec.domain, pchmsi_entry); 395 - } 396 - 397 - static inline int __init acpi_cascade_irqdomain_init(void) 398 - { 399 - return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 400 - } 401 - 402 - int __init loongarch_avec_acpi_init(struct irq_domain *parent) 403 - { 404 - int ret = 0; 405 - 406 - ret = loongarch_avec_init(parent); 407 - if (ret) { 408 - pr_err("Failed to init irq domain\n"); 409 - return ret; 410 - } 411 - 412 - ret = acpi_cascade_irqdomain_init(); 413 - if (ret) { 414 - pr_err("Failed to cascade IRQ domain\n"); 415 - return ret; 416 - } 417 - 418 - return ret; 419 - }
+1 -3
drivers/irqchip/irq-loongarch-cpu.c
··· 138 138 if (r < 0) 139 139 return r; 140 140 141 - if (cpu_has_avecint) 142 - r = loongarch_avec_acpi_init(irq_domain); 143 - return r; 141 + return 0; 144 142 } 145 143 146 144 static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
-3
drivers/irqchip/irq-loongson-eiointc.c
··· 359 359 if (r < 0) 360 360 return r; 361 361 362 - if (cpu_has_avecint) 363 - return 0; 364 - 365 362 r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); 366 363 if (r < 0) 367 364 return r;
+1 -42
drivers/irqchip/irq-loongson-pch-msi.c
··· 16 16 #include <linux/slab.h> 17 17 18 18 static int nr_pics; 19 + 19 20 struct pch_msi_data { 20 21 struct mutex msi_map_lock; 21 22 phys_addr_t doorbell; ··· 98 97 .irq_ack = irq_chip_ack_parent, 99 98 .irq_set_affinity = irq_chip_set_affinity_parent, 100 99 .irq_compose_msi_msg = pch_msi_compose_msi_msg, 101 - }; 102 - 103 - static struct irq_chip pch_msi_irq_chip_v2 = { 104 - .name = "MSI", 105 - .irq_ack = irq_chip_ack_parent, 106 - }; 107 - 108 - static struct msi_domain_info pch_msi_domain_info_v2 = { 109 - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 110 - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, 111 - .chip = &pch_msi_irq_chip_v2, 112 100 }; 113 101 114 102 static int pch_msi_parent_domain_alloc(struct irq_domain *domain, ··· 268 278 { 269 279 int i; 270 280 271 - if (cpu_has_avecint) 272 - return pch_msi_handle[0]; 273 - 274 281 for (i = 0; i < MAX_IO_PICS; i++) { 275 282 if (msi_group[i].pci_segment == pci_segment) 276 283 return pch_msi_handle[i]; ··· 288 301 irq_domain_free_fwnode(domain_handle); 289 302 290 303 return ret; 291 - } 292 - 293 - int __init pch_msi_acpi_init_v2(struct irq_domain *parent, 294 - struct acpi_madt_msi_pic *msi_entry) 295 - { 296 - struct irq_domain *msi_domain; 297 - 298 - if (pch_msi_handle[0]) 299 - return 0; 300 - 301 - pch_msi_handle[0] = irq_domain_alloc_named_fwnode("msipic-v2"); 302 - if (!pch_msi_handle[0]) { 303 - pr_err("Unable to allocate domain handle\n"); 304 - kfree(pch_msi_handle[0]); 305 - return -ENOMEM; 306 - } 307 - 308 - msi_domain = pci_msi_create_irq_domain(pch_msi_handle[0], 309 - &pch_msi_domain_info_v2, 310 - parent); 311 - if (!msi_domain) { 312 - pr_err("Failed to create PCI MSI domain\n"); 313 - kfree(pch_msi_handle[0]); 314 - return -ENOMEM; 315 - } 316 - 317 - pr_info("IRQ domain MSIPIC-V2 init done.\n"); 318 - return 0; 319 304 } 320 305 #endif