Merge tag 'for-5.18/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull more parisc architecture updates from Helge Deller:

- Revert a patch to the invalidate/flush vmap routines which broke
kernel patching functions on older PA-RISC machines.

- Fix the kernel patching code wrt locking and flushing. Works now on
B160L machine as well.

- Fix CPU IRQ affinity for LASI, WAX and Dino chips

- Add CPU hotplug support

- Detect the hppa-suse-linux-gcc compiler when cross-compiling

* tag 'for-5.18/parisc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Fix patch code locking and flushing
parisc: Find a new timesync master if current CPU is removed
parisc: Move common_stext into .text section when CONFIG_HOTPLUG_CPU=y
parisc: Rewrite arch_cpu_idle_dead() for CPU hotplugging
parisc: Implement __cpu_die() and __cpu_disable() for CPU hotplugging
parisc: Add PDC locking functions for rendezvous code
parisc: Move disable_sr_hashing_asm() into .text section
parisc: Move CPU startup-related functions into .text section
parisc: Move store_cpu_topology() into text section
parisc: Switch from GENERIC_CPU_DEVICES to GENERIC_ARCH_TOPOLOGY
parisc: Ensure set_firmware_width() is called only once
parisc: Add constants for control registers and clean up mfctl()
parisc: Detect hppa-suse-linux-gcc compiler for cross-building
parisc: Clean up cpu_check_affinity() and drop cpu_set_affinity_irq()
parisc: Fix CPU affinity for Lasi, WAX and Dino chips
Revert "parisc: Fix invalidate/flush vmap routines"

+312 -206
+3 -9
arch/parisc/Kconfig
··· 37 select GENERIC_PCI_IOMAP 38 select ARCH_HAVE_NMI_SAFE_CMPXCHG 39 select GENERIC_SMP_IDLE_THREAD 40 - select GENERIC_CPU_DEVICES 41 select GENERIC_LIB_DEVMEM_IS_ALLOWED 42 select SYSCTL_ARCH_UNALIGN_ALLOW 43 select SYSCTL_EXCEPTION_TRACE ··· 56 select HAVE_ARCH_TRACEHOOK 57 select HAVE_REGS_AND_STACK_ACCESS_API 58 select GENERIC_SCHED_CLOCK 59 select HAVE_UNSTABLE_SCHED_CLOCK if SMP 60 select LEGACY_TIMER_TICK 61 select CPU_NO_EFFICIENT_FFS ··· 280 281 If you don't know what to do here, say N. 282 283 - config PARISC_CPU_TOPOLOGY 284 - bool "Support cpu topology definition" 285 - depends on SMP 286 - default y 287 - help 288 - Support PARISC cpu topology definition. 289 - 290 config SCHED_MC 291 bool "Multi-core scheduler support" 292 - depends on PARISC_CPU_TOPOLOGY && PA8X00 293 help 294 Multi-core scheduler support improves the CPU scheduler's decision 295 making when dealing with multi-core CPU chips at a cost of slightly
··· 37 select GENERIC_PCI_IOMAP 38 select ARCH_HAVE_NMI_SAFE_CMPXCHG 39 select GENERIC_SMP_IDLE_THREAD 40 + select GENERIC_ARCH_TOPOLOGY if SMP 41 select GENERIC_LIB_DEVMEM_IS_ALLOWED 42 select SYSCTL_ARCH_UNALIGN_ALLOW 43 select SYSCTL_EXCEPTION_TRACE ··· 56 select HAVE_ARCH_TRACEHOOK 57 select HAVE_REGS_AND_STACK_ACCESS_API 58 select GENERIC_SCHED_CLOCK 59 + select GENERIC_IRQ_MIGRATION if SMP 60 select HAVE_UNSTABLE_SCHED_CLOCK if SMP 61 select LEGACY_TIMER_TICK 62 select CPU_NO_EFFICIENT_FFS ··· 279 280 If you don't know what to do here, say N. 281 282 config SCHED_MC 283 bool "Multi-core scheduler support" 284 + depends on GENERIC_ARCH_TOPOLOGY && PA8X00 285 help 286 Multi-core scheduler support improves the CPU scheduler's decision 287 making when dealing with multi-core CPU chips at a cost of slightly
+2 -2
arch/parisc/Makefile
··· 42 43 # Set default 32 bits cross compilers for vdso 44 CC_ARCHES_32 = hppa hppa2.0 hppa1.1 45 - CC_SUFFIXES = linux linux-gnu unknown-linux-gnu 46 CROSS32_COMPILE := $(call cc-cross-prefix, \ 47 $(foreach a,$(CC_ARCHES_32), \ 48 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) ··· 52 # Set default cross compiler for kernel build 53 ifdef cross_compiling 54 ifeq ($(CROSS_COMPILE),) 55 - CC_SUFFIXES = linux linux-gnu unknown-linux-gnu 56 CROSS_COMPILE := $(call cc-cross-prefix, \ 57 $(foreach a,$(CC_ARCHES), \ 58 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
··· 42 43 # Set default 32 bits cross compilers for vdso 44 CC_ARCHES_32 = hppa hppa2.0 hppa1.1 45 + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux 46 CROSS32_COMPILE := $(call cc-cross-prefix, \ 47 $(foreach a,$(CC_ARCHES_32), \ 48 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) ··· 52 # Set default cross compiler for kernel build 53 ifdef cross_compiling 54 ifeq ($(CROSS_COMPILE),) 55 + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux 56 CROSS_COMPILE := $(call cc-cross-prefix, \ 57 $(foreach a,$(CC_ARCHES), \ 58 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
+3
arch/parisc/include/asm/pdc.h
··· 94 unsigned long glob_cfg); 95 96 int __pdc_cpu_rendezvous(void); 97 static inline char * os_id_to_string(u16 os_id) { 98 switch(os_id) { 99 case OS_ID_NONE: return "No OS";
··· 94 unsigned long glob_cfg); 95 96 int __pdc_cpu_rendezvous(void); 97 + void pdc_cpu_rendezvous_lock(void); 98 + void pdc_cpu_rendezvous_unlock(void); 99 + 100 static inline char * os_id_to_string(u16 os_id) { 101 switch(os_id) { 102 case OS_ID_NONE: return "No OS";
+2 -1
arch/parisc/include/asm/pdcpat.h
··· 83 #define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */ 84 #define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */ 85 #define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */ 86 #define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */ 87 #define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache 88 * Cleansing Mode */ ··· 357 358 typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t; 359 360 - 361 extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); 362 extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); 363 extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
··· 83 #define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */ 84 #define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */ 85 #define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */ 86 + #define PDC_PAT_CPU_GET_PDC_ENTRYPOINT 11L /* Return PDC Entry point */ 87 #define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */ 88 #define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache 89 * Cleansing Mode */ ··· 356 357 typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t; 358 359 + extern int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry); 360 extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); 361 extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); 362 extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
+1
arch/parisc/include/asm/processor.h
··· 95 96 extern struct system_cpuinfo_parisc boot_cpu_data; 97 DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); 98 99 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) 100
··· 95 96 extern struct system_cpuinfo_parisc boot_cpu_data; 97 DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); 98 + extern int time_keeper_id; /* CPU used for timekeeping */ 99 100 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) 101
+2 -7
arch/parisc/include/asm/smp.h
··· 44 45 #define NO_PROC_ID 0xFF /* No processor magic marker */ 46 #define ANY_PROC_ID 0xFF /* Any processor magic marker */ 47 - static inline int __cpu_disable (void) { 48 - return 0; 49 - } 50 - static inline void __cpu_die (unsigned int cpu) { 51 - while(1) 52 - ; 53 - } 54 55 #endif /* __ASM_SMP_H */
··· 44 45 #define NO_PROC_ID 0xFF /* No processor magic marker */ 46 #define ANY_PROC_ID 0xFF /* Any processor magic marker */ 47 + int __cpu_disable(void); 48 + void __cpu_die(unsigned int cpu); 49 50 #endif /* __ASM_SMP_H */
+8 -9
arch/parisc/include/asm/special_insns.h
··· 30 pa; \ 31 }) 32 33 #define mfctl(reg) ({ \ 34 unsigned long cr; \ 35 __asm__ __volatile__( \ 36 - "mfctl " #reg ",%0" : \ 37 - "=r" (cr) \ 38 ); \ 39 cr; \ 40 }) ··· 48 : /* no outputs */ \ 49 : "r" (gr), "i" (cr) : "memory") 50 51 - /* these are here to de-mystefy the calling code, and to provide hooks */ 52 - /* which I needed for debugging EIEM problems -PB */ 53 - #define get_eiem() mfctl(15) 54 - static inline void set_eiem(unsigned long val) 55 - { 56 - mtctl(val, 15); 57 - } 58 59 #define mfsp(reg) ({ \ 60 unsigned long cr; \
··· 30 pa; \ 31 }) 32 33 + #define CR_EIEM 15 /* External Interrupt Enable Mask */ 34 + #define CR_CR16 16 /* CR16 Interval Timer */ 35 + #define CR_EIRR 23 /* External Interrupt Request Register */ 36 + 37 #define mfctl(reg) ({ \ 38 unsigned long cr; \ 39 __asm__ __volatile__( \ 40 + "mfctl %1,%0" : \ 41 + "=r" (cr) : "i" (reg) \ 42 ); \ 43 cr; \ 44 }) ··· 44 : /* no outputs */ \ 45 : "r" (gr), "i" (cr) : "memory") 46 47 + #define get_eiem() mfctl(CR_EIEM) 48 + #define set_eiem(val) mtctl(val, CR_EIEM) 49 50 #define mfsp(reg) ({ \ 51 unsigned long cr; \
+3 -20
arch/parisc/include/asm/topology.h
··· 1 #ifndef _ASM_PARISC_TOPOLOGY_H 2 #define _ASM_PARISC_TOPOLOGY_H 3 4 - #ifdef CONFIG_PARISC_CPU_TOPOLOGY 5 6 #include <linux/cpumask.h> 7 - 8 - struct cputopo_parisc { 9 - int thread_id; 10 - int core_id; 11 - int socket_id; 12 - cpumask_t thread_sibling; 13 - cpumask_t core_sibling; 14 - }; 15 - 16 - extern struct cputopo_parisc cpu_topology[NR_CPUS]; 17 - 18 - #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 19 - #define topology_core_id(cpu) (cpu_topology[cpu].core_id) 20 - #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) 21 - #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) 22 - 23 - void init_cpu_topology(void); 24 - void store_cpu_topology(unsigned int cpuid); 25 - const struct cpumask *cpu_coregroup_mask(int cpu); 26 27 #else 28 29 static inline void init_cpu_topology(void) { } 30 static inline void store_cpu_topology(unsigned int cpuid) { } 31 32 #endif 33
··· 1 #ifndef _ASM_PARISC_TOPOLOGY_H 2 #define _ASM_PARISC_TOPOLOGY_H 3 4 + #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY 5 6 #include <linux/cpumask.h> 7 + #include <linux/arch_topology.h> 8 9 #else 10 11 static inline void init_cpu_topology(void) { } 12 static inline void store_cpu_topology(unsigned int cpuid) { } 13 + static inline void reset_cpu_topology(void) { } 14 15 #endif 16
+1 -1
arch/parisc/kernel/Makefile
··· 31 obj64-$(CONFIG_AUDIT) += compat_audit.o 32 # only supported for PCX-W/U in 64-bit mode at the moment 33 obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) 34 - obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o 35 obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 36 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 37 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
··· 31 obj64-$(CONFIG_AUDIT) += compat_audit.o 32 # only supported for PCX-W/U in 64-bit mode at the moment 33 obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) 34 + obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o 35 obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 36 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 37 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+6 -20
arch/parisc/kernel/cache.c
··· 273 } 274 } 275 276 - void __init disable_sr_hashing(void) 277 { 278 int srhash_type, retval; 279 unsigned long space_bits; ··· 611 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 612 { 613 if (pfn_valid(pfn)) { 614 - flush_tlb_page(vma, vmaddr); 615 if (likely(vma->vm_mm->context.space_id)) { 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 617 } else { 618 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); ··· 624 { 625 unsigned long start = (unsigned long)vaddr; 626 unsigned long end = start + size; 627 - unsigned long flags, physaddr; 628 629 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 630 (unsigned long)size >= parisc_cache_flush_threshold) { ··· 632 return; 633 } 634 635 - while (start < end) { 636 - physaddr = lpa(start); 637 - purge_tlb_start(flags); 638 - pdtlb(SR_KERNEL, start); 639 - purge_tlb_end(flags); 640 - flush_dcache_page_asm(physaddr, start); 641 - start += PAGE_SIZE; 642 - } 643 } 644 EXPORT_SYMBOL(flush_kernel_vmap_range); 645 ··· 641 { 642 unsigned long start = (unsigned long)vaddr; 643 unsigned long end = start + size; 644 - unsigned long flags, physaddr; 645 646 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 647 (unsigned long)size >= parisc_cache_flush_threshold) { ··· 649 return; 650 } 651 652 - while (start < end) { 653 - physaddr = lpa(start); 654 - purge_tlb_start(flags); 655 - pdtlb(SR_KERNEL, start); 656 - purge_tlb_end(flags); 657 - purge_dcache_page_asm(physaddr, start); 658 - start += PAGE_SIZE; 659 - } 660 } 661 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
··· 273 } 274 } 275 276 + void disable_sr_hashing(void) 277 { 278 int srhash_type, retval; 279 unsigned long space_bits; ··· 611 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 612 { 613 if (pfn_valid(pfn)) { 614 if (likely(vma->vm_mm->context.space_id)) { 615 + flush_tlb_page(vma, vmaddr); 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 617 } else { 618 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); ··· 624 { 625 unsigned long start = (unsigned long)vaddr; 626 unsigned long end = start + size; 627 628 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 629 (unsigned long)size >= parisc_cache_flush_threshold) { ··· 633 return; 634 } 635 636 + flush_kernel_dcache_range_asm(start, end); 637 + flush_tlb_kernel_range(start, end); 638 } 639 EXPORT_SYMBOL(flush_kernel_vmap_range); 640 ··· 648 { 649 unsigned long start = (unsigned long)vaddr; 650 unsigned long end = start + size; 651 652 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 653 (unsigned long)size >= parisc_cache_flush_threshold) { ··· 657 return; 658 } 659 660 + purge_kernel_dcache_range_asm(start, end); 661 + flush_tlb_kernel_range(start, end); 662 } 663 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+43 -1
arch/parisc/kernel/firmware.c
··· 83 84 /* Firmware needs to be initially set to narrow to determine the 85 * actual firmware width. */ 86 - int parisc_narrow_firmware __ro_after_init = 1; 87 #endif 88 89 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls ··· 174 void set_firmware_width(void) 175 { 176 unsigned long flags; 177 spin_lock_irqsave(&pdc_lock, flags); 178 set_firmware_width_unlocked(); 179 spin_unlock_irqrestore(&pdc_lock, flags); ··· 329 return mem_pdc_call(PDC_PROC, 1, 0); 330 } 331 332 333 /** 334 * pdc_chassis_warn - Fetches chassis warnings 335 * @retval: -1 on error, 0 on success
··· 83 84 /* Firmware needs to be initially set to narrow to determine the 85 * actual firmware width. */ 86 + int parisc_narrow_firmware __ro_after_init = 2; 87 #endif 88 89 /* On most currently-supported platforms, IODC I/O calls are 32-bit calls ··· 174 void set_firmware_width(void) 175 { 176 unsigned long flags; 177 + 178 + /* already initialized? */ 179 + if (parisc_narrow_firmware != 2) 180 + return; 181 + 182 spin_lock_irqsave(&pdc_lock, flags); 183 set_firmware_width_unlocked(); 184 spin_unlock_irqrestore(&pdc_lock, flags); ··· 324 return mem_pdc_call(PDC_PROC, 1, 0); 325 } 326 327 + /** 328 + * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state 329 + */ 330 + void pdc_cpu_rendezvous_lock(void) 331 + { 332 + spin_lock(&pdc_lock); 333 + } 334 335 + /** 336 + * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state 337 + */ 338 + void pdc_cpu_rendezvous_unlock(void) 339 + { 340 + spin_unlock(&pdc_lock); 341 + } 342 + 343 + /** 344 + * pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU 345 + * @retval: -1 on error, 0 on success 346 + */ 347 + int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry) 348 + { 349 + int retval = 0; 350 + unsigned long flags; 351 + 352 + if (!IS_ENABLED(CONFIG_SMP) || !is_pdc_pat()) { 353 + *pdc_entry = MEM_PDC; 354 + return 0; 355 + } 356 + 357 + spin_lock_irqsave(&pdc_lock, flags); 358 + retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_PDC_ENTRYPOINT, 359 + __pa(pdc_result)); 360 + *pdc_entry = pdc_result[0]; 361 + spin_unlock_irqrestore(&pdc_lock, flags); 362 + 363 + return retval; 364 + } 365 /** 366 * pdc_chassis_warn - Fetches chassis warnings 367 * @retval: -1 on error, 0 on success
+9 -2
arch/parisc/kernel/head.S
··· 162 /* FALLTHROUGH */ 163 .procend 164 165 /* 166 ** Code Common to both Monarch and Slave processors. 167 ** Entry: ··· 379 380 .procend 381 #endif /* CONFIG_SMP */ 382 - 383 - ENDPROC(parisc_kernel_start) 384 385 #ifndef CONFIG_64BIT 386 .section .data..ro_after_init
··· 162 /* FALLTHROUGH */ 163 .procend 164 165 + #ifdef CONFIG_HOTPLUG_CPU 166 + /* common_stext is far away in another section... jump there */ 167 + load32 PA(common_stext), %rp 168 + bv,n (%rp) 169 + 170 + /* common_stext and smp_slave_stext needs to be in text section */ 171 + .text 172 + #endif 173 + 174 /* 175 ** Code Common to both Monarch and Slave processors. 176 ** Entry: ··· 370 371 .procend 372 #endif /* CONFIG_SMP */ 373 374 #ifndef CONFIG_64BIT 375 .section .data..ro_after_init
+3 -22
arch/parisc/kernel/irq.c
··· 105 if (irqd_is_per_cpu(d)) 106 return -EINVAL; 107 108 - /* whatever mask they set, we just allow one CPU */ 109 - cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1), 110 - dest, cpu_online_mask); 111 if (cpu_dest >= nr_cpu_ids) 112 - cpu_dest = cpumask_first_and(dest, cpu_online_mask); 113 114 return cpu_dest; 115 - } 116 - 117 - static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, 118 - bool force) 119 - { 120 - int cpu_dest; 121 - 122 - cpu_dest = cpu_check_affinity(d, dest); 123 - if (cpu_dest < 0) 124 - return -1; 125 - 126 - cpumask_copy(irq_data_get_affinity_mask(d), dest); 127 - 128 - return 0; 129 } 130 #endif 131 ··· 119 .irq_unmask = cpu_unmask_irq, 120 .irq_ack = cpu_ack_irq, 121 .irq_eoi = cpu_eoi_irq, 122 - #ifdef CONFIG_SMP 123 - .irq_set_affinity = cpu_set_affinity_irq, 124 - #endif 125 /* XXX: Needs to be written. We managed without it so far, but 126 * we really ought to write it. 127 */ ··· 563 #endif 564 } 565 566 - void __init init_IRQ(void) 567 { 568 local_irq_disable(); /* PARANOID - should already be disabled */ 569 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
··· 105 if (irqd_is_per_cpu(d)) 106 return -EINVAL; 107 108 + cpu_dest = cpumask_first_and(dest, cpu_online_mask); 109 if (cpu_dest >= nr_cpu_ids) 110 + cpu_dest = cpumask_first(cpu_online_mask); 111 112 return cpu_dest; 113 } 114 #endif 115 ··· 135 .irq_unmask = cpu_unmask_irq, 136 .irq_ack = cpu_ack_irq, 137 .irq_eoi = cpu_eoi_irq, 138 /* XXX: Needs to be written. We managed without it so far, but 139 * we really ought to write it. 140 */ ··· 582 #endif 583 } 584 585 + void init_IRQ(void) 586 { 587 local_irq_disable(); /* PARANOID - should already be disabled */ 588 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
+1 -1
arch/parisc/kernel/pacache.S
··· 1264 nop 1265 ENDPROC_CFI(flush_kernel_icache_range_asm) 1266 1267 - __INIT 1268 1269 /* align should cover use of rfi in disable_sr_hashing_asm and 1270 * srdis_done.
··· 1264 nop 1265 ENDPROC_CFI(flush_kernel_icache_range_asm) 1266 1267 + .text 1268 1269 /* align should cover use of rfi in disable_sr_hashing_asm and 1270 * srdis_done.
+11 -14
arch/parisc/kernel/patch.c
··· 40 41 *need_unmap = 1; 42 set_fixmap(fixmap, page_to_phys(page)); 43 - if (flags) 44 - raw_spin_lock_irqsave(&patch_lock, *flags); 45 - else 46 - __acquire(&patch_lock); 47 48 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); 49 } ··· 49 { 50 clear_fixmap(fixmap); 51 52 - if (flags) 53 - raw_spin_unlock_irqrestore(&patch_lock, *flags); 54 - else 55 - __release(&patch_lock); 56 } 57 58 void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) ··· 61 int mapped; 62 63 /* Make sure we don't have any aliases in cache */ 64 - flush_kernel_vmap_range(addr, len); 65 - flush_icache_range(start, end); 66 67 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); 68 ··· 76 * We're crossing a page boundary, so 77 * need to remap 78 */ 79 - flush_kernel_vmap_range((void *)fixmap, 80 - (p-fixmap) * sizeof(*p)); 81 if (mapped) 82 patch_unmap(FIX_TEXT_POKE0, &flags); 83 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, ··· 87 } 88 } 89 90 - flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); 91 if (mapped) 92 patch_unmap(FIX_TEXT_POKE0, &flags); 93 - flush_icache_range(start, end); 94 } 95 96 void __kprobes __patch_text(void *addr, u32 insn)
··· 40 41 *need_unmap = 1; 42 set_fixmap(fixmap, page_to_phys(page)); 43 + raw_spin_lock_irqsave(&patch_lock, *flags); 44 45 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); 46 } ··· 52 { 53 clear_fixmap(fixmap); 54 55 + raw_spin_unlock_irqrestore(&patch_lock, *flags); 56 } 57 58 void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) ··· 67 int mapped; 68 69 /* Make sure we don't have any aliases in cache */ 70 + flush_kernel_dcache_range_asm(start, end); 71 + flush_kernel_icache_range_asm(start, end); 72 + flush_tlb_kernel_range(start, end); 73 74 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); 75 ··· 81 * We're crossing a page boundary, so 82 * need to remap 83 */ 84 + flush_kernel_dcache_range_asm((unsigned long)fixmap, 85 + (unsigned long)p); 86 + flush_tlb_kernel_range((unsigned long)fixmap, 87 + (unsigned long)p); 88 if (mapped) 89 patch_unmap(FIX_TEXT_POKE0, &flags); 90 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, ··· 90 } 91 } 92 93 + flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); 94 + flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); 95 if (mapped) 96 patch_unmap(FIX_TEXT_POKE0, &flags); 97 } 98 99 void __kprobes __patch_text(void *addr, u32 insn)
+24 -3
arch/parisc/kernel/process.c
··· 38 #include <linux/rcupdate.h> 39 #include <linux/random.h> 40 #include <linux/nmi.h> 41 42 #include <asm/io.h> 43 #include <asm/asm-offsets.h> ··· 47 #include <asm/pdc_chassis.h> 48 #include <asm/unwind.h> 49 #include <asm/sections.h> 50 51 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 52 #define CMD_RESET 5 /* reset any module */ ··· 160 int running_on_qemu __ro_after_init; 161 EXPORT_SYMBOL(running_on_qemu); 162 163 - void __cpuidle arch_cpu_idle_dead(void) 164 { 165 - /* nop on real hardware, qemu will offline CPU. */ 166 - asm volatile("or %%r31,%%r31,%%r31\n":::); 167 } 168 169 void __cpuidle arch_cpu_idle(void)
··· 38 #include <linux/rcupdate.h> 39 #include <linux/random.h> 40 #include <linux/nmi.h> 41 + #include <linux/sched/hotplug.h> 42 43 #include <asm/io.h> 44 #include <asm/asm-offsets.h> ··· 46 #include <asm/pdc_chassis.h> 47 #include <asm/unwind.h> 48 #include <asm/sections.h> 49 + #include <asm/cacheflush.h> 50 51 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 52 #define CMD_RESET 5 /* reset any module */ ··· 158 int running_on_qemu __ro_after_init; 159 EXPORT_SYMBOL(running_on_qemu); 160 161 + /* 162 + * Called from the idle thread for the CPU which has been shutdown. 163 + */ 164 + void arch_cpu_idle_dead(void) 165 { 166 + #ifdef CONFIG_HOTPLUG_CPU 167 + idle_task_exit(); 168 + 169 + local_irq_disable(); 170 + 171 + /* Tell __cpu_die() that this CPU is now safe to dispose of. */ 172 + (void)cpu_report_death(); 173 + 174 + /* Ensure that the cache lines are written out. */ 175 + flush_cache_all_local(); 176 + flush_tlb_all_local(NULL); 177 + 178 + /* Let PDC firmware put CPU into firmware idle loop. */ 179 + __pdc_cpu_rendezvous(); 180 + 181 + pr_warn("PDC does not provide rendezvous function.\n"); 182 + #endif 183 + while (1); 184 } 185 186 void __cpuidle arch_cpu_idle(void)
+4 -2
arch/parisc/kernel/processor.c
··· 19 #include <linux/random.h> 20 #include <linux/slab.h> 21 #include <linux/cpu.h> 22 #include <asm/param.h> 23 #include <asm/cache.h> 24 #include <asm/hardware.h> /* for register_parisc_driver() stuff */ ··· 318 * 319 * o Enable CPU profiling hooks. 320 */ 321 - int __init init_per_cpu(int cpunum) 322 { 323 int ret; 324 struct pdc_coproc_cfg coproc_cfg; ··· 391 boot_cpu_data.cpu_hz / 1000000, 392 boot_cpu_data.cpu_hz % 1000000 ); 393 394 - #ifdef CONFIG_PARISC_CPU_TOPOLOGY 395 seq_printf(m, "physical id\t: %d\n", 396 topology_physical_package_id(cpu)); 397 seq_printf(m, "siblings\t: %d\n", ··· 461 */ 462 void __init processor_init(void) 463 { 464 register_parisc_driver(&cpu_driver); 465 }
··· 19 #include <linux/random.h> 20 #include <linux/slab.h> 21 #include <linux/cpu.h> 22 + #include <asm/topology.h> 23 #include <asm/param.h> 24 #include <asm/cache.h> 25 #include <asm/hardware.h> /* for register_parisc_driver() stuff */ ··· 317 * 318 * o Enable CPU profiling hooks. 319 */ 320 + int init_per_cpu(int cpunum) 321 { 322 int ret; 323 struct pdc_coproc_cfg coproc_cfg; ··· 390 boot_cpu_data.cpu_hz / 1000000, 391 boot_cpu_data.cpu_hz % 1000000 ); 392 393 + #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY 394 seq_printf(m, "physical id\t: %d\n", 395 topology_physical_package_id(cpu)); 396 seq_printf(m, "siblings\t: %d\n", ··· 460 */ 461 void __init processor_init(void) 462 { 463 + reset_cpu_topology(); 464 register_parisc_driver(&cpu_driver); 465 }
+93 -15
arch/parisc/kernel/smp.c
··· 30 #include <linux/ftrace.h> 31 #include <linux/cpu.h> 32 #include <linux/kgdb.h> 33 34 #include <linux/atomic.h> 35 #include <asm/current.h> ··· 60 61 /* track which CPU is booting */ 62 static volatile int cpu_now_booting; 63 - 64 - static int parisc_max_cpus = 1; 65 66 static DEFINE_PER_CPU(spinlock_t, ipi_lock); 67 ··· 268 /* 269 * Called by secondaries to update state and initialize CPU registers. 270 */ 271 - static void __init 272 smp_cpu_init(int cpunum) 273 { 274 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ ··· 308 * Slaves start using C here. Indirectly called from smp_slave_stext. 309 * Do what start_kernel() and main() do for boot strap processor (aka monarch) 310 */ 311 - void __init smp_callin(unsigned long pdce_proc) 312 { 313 int slave_id = cpu_now_booting; 314 ··· 333 /* 334 * Bring one cpu online. 335 */ 336 - int smp_boot_one_cpu(int cpuid, struct task_struct *idle) 337 { 338 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 339 long timeout; 340 341 /* Let _start know what logical CPU we're booting 342 ** (offset into init_tasks[],cpu_data[]) ··· 390 if(cpu_online(cpuid)) { 391 /* Which implies Slave has started up */ 392 cpu_now_booting = 0; 393 - smp_init_current_idle_task = NULL; 394 goto alive ; 395 } 396 udelay(100); ··· 430 spin_lock_init(&per_cpu(ipi_lock, cpu)); 431 432 init_cpu_present(cpumask_of(0)); 433 - 434 - parisc_max_cpus = max_cpus; 435 - if (!max_cpus) 436 - printk(KERN_INFO "SMP mode deactivated.\n"); 437 } 438 439 440 - void smp_cpus_done(unsigned int cpu_max) 441 { 442 - return; 443 } 444 445 446 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 447 { 448 - if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle)) 449 - return -ENOSYS; 450 451 - return cpu_online(cpu) ? 0 : -ENOSYS; 452 } 453 454 #ifdef CONFIG_PROC_FS
··· 30 #include <linux/ftrace.h> 31 #include <linux/cpu.h> 32 #include <linux/kgdb.h> 33 + #include <linux/sched/hotplug.h> 34 35 #include <linux/atomic.h> 36 #include <asm/current.h> ··· 59 60 /* track which CPU is booting */ 61 static volatile int cpu_now_booting; 62 63 static DEFINE_PER_CPU(spinlock_t, ipi_lock); 64 ··· 269 /* 270 * Called by secondaries to update state and initialize CPU registers. 271 */ 272 + static void 273 smp_cpu_init(int cpunum) 274 { 275 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ ··· 309 * Slaves start using C here. Indirectly called from smp_slave_stext. 310 * Do what start_kernel() and main() do for boot strap processor (aka monarch) 311 */ 312 + void smp_callin(unsigned long pdce_proc) 313 { 314 int slave_id = cpu_now_booting; 315 ··· 334 /* 335 * Bring one cpu online. 336 */ 337 + static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) 338 { 339 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 340 long timeout; 341 + 342 + #ifdef CONFIG_HOTPLUG_CPU 343 + int i; 344 + 345 + /* reset irq statistics for this CPU */ 346 + memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); 347 + for (i = 0; i < NR_IRQS; i++) { 348 + struct irq_desc *desc = irq_to_desc(i); 349 + 350 + if (desc && desc->kstat_irqs) 351 + *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0; 352 + } 353 + #endif 354 + 355 + /* wait until last booting CPU has started. */ 356 + while (cpu_now_booting) 357 + ; 358 359 /* Let _start know what logical CPU we're booting 360 ** (offset into init_tasks[],cpu_data[]) ··· 374 if(cpu_online(cpuid)) { 375 /* Which implies Slave has started up */ 376 cpu_now_booting = 0; 377 goto alive ; 378 } 379 udelay(100); ··· 415 spin_lock_init(&per_cpu(ipi_lock, cpu)); 416 417 init_cpu_present(cpumask_of(0)); 418 } 419 420 421 + void __init smp_cpus_done(unsigned int cpu_max) 422 { 423 } 424 425 426 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 427 { 428 + if (cpu_online(cpu)) 429 + return 0; 430 431 + if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle)) 432 + return -EIO; 433 + 434 + return cpu_online(cpu) ? 0 : -EIO; 435 + } 436 + 437 + /* 438 + * __cpu_disable runs on the processor to be shutdown. 439 + */ 440 + int __cpu_disable(void) 441 + { 442 + #ifdef CONFIG_HOTPLUG_CPU 443 + unsigned int cpu = smp_processor_id(); 444 + 445 + remove_cpu_topology(cpu); 446 + 447 + /* 448 + * Take this CPU offline. Once we clear this, we can't return, 449 + * and we must not schedule until we're ready to give up the cpu. 450 + */ 451 + set_cpu_online(cpu, false); 452 + 453 + /* Find a new timesync master */ 454 + if (cpu == time_keeper_id) { 455 + time_keeper_id = cpumask_first(cpu_online_mask); 456 + pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id); 457 + } 458 + 459 + disable_percpu_irq(IPI_IRQ); 460 + 461 + irq_migrate_all_off_this_cpu(); 462 + 463 + flush_cache_all_local(); 464 + flush_tlb_all_local(NULL); 465 + 466 + /* disable all irqs, including timer irq */ 467 + local_irq_disable(); 468 + 469 + /* wait for next timer irq ... */ 470 + mdelay(1000/HZ+100); 471 + 472 + /* ... and then clear all pending external irqs */ 473 + set_eiem(0); 474 + mtctl(~0UL, CR_EIRR); 475 + mfctl(CR_EIRR); 476 + mtctl(0, CR_EIRR); 477 + #endif 478 + return 0; 479 + } 480 + 481 + /* 482 + * called on the thread which is asking for a CPU to be shutdown - 483 + * waits until shutdown has completed, or it is timed out. 484 + */ 485 + void __cpu_die(unsigned int cpu) 486 + { 487 + pdc_cpu_rendezvous_lock(); 488 + 489 + if (!cpu_wait_death(cpu, 5)) { 490 + pr_crit("CPU%u: cpu didn't die\n", cpu); 491 + return; 492 + } 493 + pr_info("CPU%u: is shutting down\n", cpu); 494 + 495 + /* set task's state to interruptible sleep */ 496 + set_current_state(TASK_INTERRUPTIBLE); 497 + schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ); 498 + 499 + pdc_cpu_rendezvous_unlock(); 500 } 501 502 #ifdef CONFIG_PROC_FS
+4 -2
arch/parisc/kernel/time.c
··· 40 41 #include <linux/timex.h> 42 43 static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ 44 45 /* ··· 86 cpuinfo->it_value = next_tick; 87 88 /* Go do system house keeping. */ 89 - if (cpu != 0) 90 ticks_elapsed = 0; 91 legacy_timer_tick(ticks_elapsed); 92 ··· 152 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 153 }; 154 155 - void __init start_cpu_itimer(void) 156 { 157 unsigned int cpu = smp_processor_id(); 158 unsigned long next_tick = mfctl(16) + clocktick;
··· 40 41 #include <linux/timex.h> 42 43 + int time_keeper_id __read_mostly; /* CPU used for timekeeping. */ 44 + 45 static unsigned long clocktick __ro_after_init; /* timer cycles per tick */ 46 47 /* ··· 84 cpuinfo->it_value = next_tick; 85 86 /* Go do system house keeping. */ 87 + if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id)) 88 ticks_elapsed = 0; 89 legacy_timer_tick(ticks_elapsed); 90 ··· 150 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 151 }; 152 153 + void start_cpu_itimer(void) 154 { 155 unsigned int cpu = smp_processor_id(); 156 unsigned long next_tick = mfctl(16) + clocktick;
+18 -59
arch/parisc/kernel/topology.c
··· 13 #include <linux/percpu.h> 14 #include <linux/sched.h> 15 #include <linux/sched/topology.h> 16 17 #include <asm/topology.h> 18 19 - /* 20 - * cpu topology table 21 - */ 22 - struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly; 23 - EXPORT_SYMBOL_GPL(cpu_topology); 24 25 - const struct cpumask *cpu_coregroup_mask(int cpu) 26 - { 27 - return &cpu_topology[cpu].core_sibling; 28 - } 29 - 30 - static void update_siblings_masks(unsigned int cpuid) 31 - { 32 - struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; 33 - int cpu; 34 - 35 - /* update core and thread sibling masks */ 36 - for_each_possible_cpu(cpu) { 37 - cpu_topo = &cpu_topology[cpu]; 38 - 39 - if (cpuid_topo->socket_id != cpu_topo->socket_id) 40 - continue; 41 - 42 - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); 43 - if (cpu != cpuid) 44 - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); 45 - 46 - if (cpuid_topo->core_id != cpu_topo->core_id) 47 - continue; 48 - 49 - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); 50 - if (cpu != cpuid) 51 - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); 52 - } 53 - smp_wmb(); 54 - } 55 - 56 - static int dualcores_found __initdata; 57 58 /* 59 * store_cpu_topology is called at boot when only one cpu is running 60 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, 61 * which prevents simultaneous write access to cpu_topology array 62 */ 63 - void __init store_cpu_topology(unsigned int cpuid) 64 { 65 - struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid]; 66 struct cpuinfo_parisc *p; 67 int max_socket = -1; 68 unsigned long cpu; ··· 37 /* If the cpu topology has been already set, just return */ 38 if (cpuid_topo->core_id != -1) 39 return; 40 41 /* create cpu topology mapping */ 42 cpuid_topo->thread_id = -1; ··· 59 cpuid_topo->core_id = cpu_topology[cpu].core_id; 60 if (p->cpu_loc) { 61 cpuid_topo->core_id++; 62 - cpuid_topo->socket_id = cpu_topology[cpu].socket_id; 63 dualcores_found = 1; 64 continue; 65 } 66 } 67 68 - if (cpuid_topo->socket_id == -1) 69 - max_socket = max(max_socket, cpu_topology[cpu].socket_id); 70 } 71 72 - if (cpuid_topo->socket_id == -1) 73 - cpuid_topo->socket_id = max_socket + 1; 74 75 update_siblings_masks(cpuid); 76 77 pr_info("CPU%u: cpu core %d of socket %d\n", 78 cpuid, 79 cpu_topology[cpuid].core_id, 80 - cpu_topology[cpuid].socket_id); 81 } 82 83 static struct sched_domain_topology_level parisc_mc_topology[] = { ··· 95 */ 96 void __init init_cpu_topology(void) 97 { 98 - unsigned int cpu; 99 - 100 - /* init core mask and capacity */ 101 - for_each_possible_cpu(cpu) { 102 - struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]); 103 - 104 - cpu_topo->thread_id = -1; 105 - cpu_topo->core_id = -1; 106 - cpu_topo->socket_id = -1; 107 - cpumask_clear(&cpu_topo->core_sibling); 108 - cpumask_clear(&cpu_topo->thread_sibling); 109 - } 110 - smp_wmb(); 111 - 112 /* Set scheduler topology descriptor */ 113 if (dualcores_found) 114 set_sched_topology(parisc_mc_topology);
··· 13 #include <linux/percpu.h> 14 #include <linux/sched.h> 15 #include <linux/sched/topology.h> 16 + #include <linux/cpu.h> 17 18 #include <asm/topology.h> 19 + #include <asm/sections.h> 20 21 + static DEFINE_PER_CPU(struct cpu, cpu_devices); 22 23 + static int dualcores_found; 24 25 /* 26 * store_cpu_topology is called at boot when only one cpu is running 27 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, 28 * which prevents simultaneous write access to cpu_topology array 29 */ 30 + void store_cpu_topology(unsigned int cpuid) 31 { 32 + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; 33 struct cpuinfo_parisc *p; 34 int max_socket = -1; 35 unsigned long cpu; ··· 70 /* If the cpu topology has been already set, just return */ 71 if (cpuid_topo->core_id != -1) 72 return; 73 + 74 + #ifdef CONFIG_HOTPLUG_CPU 75 + per_cpu(cpu_devices, cpuid).hotpluggable = 1; 76 + #endif 77 + if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid)) 78 + pr_warn("Failed to register CPU%d device", cpuid); 79 80 /* create cpu topology mapping */ 81 cpuid_topo->thread_id = -1; ··· 86 cpuid_topo->core_id = cpu_topology[cpu].core_id; 87 if (p->cpu_loc) { 88 cpuid_topo->core_id++; 89 + cpuid_topo->package_id = cpu_topology[cpu].package_id; 90 dualcores_found = 1; 91 continue; 92 } 93 } 94 95 + if (cpuid_topo->package_id == -1) 96 + max_socket = max(max_socket, cpu_topology[cpu].package_id); 97 } 98 99 + if (cpuid_topo->package_id == -1) 100 + cpuid_topo->package_id = max_socket + 1; 101 102 update_siblings_masks(cpuid); 103 104 pr_info("CPU%u: cpu core %d of socket %d\n", 105 cpuid, 106 cpu_topology[cpuid].core_id, 107 + cpu_topology[cpuid].package_id); 108 } 109 110 static struct sched_domain_topology_level parisc_mc_topology[] = { ··· 122 */ 123 void __init init_cpu_topology(void) 124 { 125 /* Set scheduler topology descriptor */ 126 if (dualcores_found) 127 set_sched_topology(parisc_mc_topology);
+33 -8
drivers/parisc/dino.c
··· 142 { 143 struct pci_hba_data hba; /* 'C' inheritance - must be first */ 144 spinlock_t dinosaur_pen; 145 - unsigned long txn_addr; /* EIR addr to generate interrupt */ 146 - u32 txn_data; /* EIR data assign to each dino */ 147 u32 imr; /* IRQ's which are enabled */ 148 int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ 149 #ifdef DINO_DEBUG 150 unsigned int dino_irr0; /* save most recent IRQ line stat */ ··· 338 if (tmp & DINO_MASK_IRQ(local_irq)) { 339 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", 340 __func__, tmp); 341 - gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); 342 } 343 } 344 345 static struct irq_chip dino_interrupt_type = { 346 .name = "GSC-PCI", 347 .irq_unmask = dino_unmask_irq, 348 .irq_mask = dino_mask_irq, 349 }; 350 351 ··· 834 { 835 int status; 836 u32 eim; 837 - struct gsc_irq gsc_irq; 838 struct resource *res; 839 840 pcibios_register_hba(&dino_dev->hba); ··· 848 ** still only has 11 IRQ input lines - just map some of them 849 ** to a different processor. 850 */ 851 - dev->irq = gsc_alloc_irq(&gsc_irq); 852 - dino_dev->txn_addr = gsc_irq.txn_addr; 853 - dino_dev->txn_data = gsc_irq.txn_data; 854 - eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; 855 856 /* 857 ** Dino needs a PA "IRQ" to get a processor's attention.
··· 142 { 143 struct pci_hba_data hba; /* 'C' inheritance - must be first */ 144 spinlock_t dinosaur_pen; 145 u32 imr; /* IRQ's which are enabled */ 146 + struct gsc_irq gsc_irq; 147 int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ 148 #ifdef DINO_DEBUG 149 unsigned int dino_irr0; /* save most recent IRQ line stat */ ··· 339 if (tmp & DINO_MASK_IRQ(local_irq)) { 340 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", 341 __func__, tmp); 342 + gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr); 343 } 344 } 345 + 346 + #ifdef CONFIG_SMP 347 + static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, 348 + bool force) 349 + { 350 + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); 351 + struct cpumask tmask; 352 + int cpu_irq; 353 + u32 eim; 354 + 355 + if (!cpumask_and(&tmask, dest, cpu_online_mask)) 356 + return -EINVAL; 357 + 358 + cpu_irq = cpu_check_affinity(d, &tmask); 359 + if (cpu_irq < 0) 360 + return cpu_irq; 361 + 362 + dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); 363 + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; 364 + __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); 365 + 366 + irq_data_update_effective_affinity(d, &tmask); 367 + 368 + return IRQ_SET_MASK_OK; 369 + } 370 + #endif 371 372 static struct irq_chip dino_interrupt_type = { 373 .name = "GSC-PCI", 374 .irq_unmask = dino_unmask_irq, 375 .irq_mask = dino_mask_irq, 376 + #ifdef CONFIG_SMP 377 + .irq_set_affinity = dino_set_affinity_irq, 378 + #endif 379 }; 380 381 ··· 806 { 807 int status; 808 u32 eim; 809 struct resource *res; 810 811 pcibios_register_hba(&dino_dev->hba); ··· 821 ** still only has 11 IRQ input lines - just map some of them 822 ** to a different processor. 823 */ 824 + dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq); 825 + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; 826 827 /* 828 ** Dino needs a PA "IRQ" to get a processor's attention.
+31
drivers/parisc/gsc.c
··· 135 */ 136 } 137 138 static struct irq_chip gsc_asic_interrupt_type = { 139 .name = "GSC-ASIC", 140 .irq_unmask = gsc_asic_unmask_irq, 141 .irq_mask = gsc_asic_mask_irq, 142 }; 143 144 int gsc_assign_irq(struct irq_chip *type, void *data)
··· 135 */ 136 } 137 138 + #ifdef CONFIG_SMP 139 + static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, 140 + bool force) 141 + { 142 + struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d); 143 + struct cpumask tmask; 144 + int cpu_irq; 145 + 146 + if (!cpumask_and(&tmask, dest, cpu_online_mask)) 147 + return -EINVAL; 148 + 149 + cpu_irq = cpu_check_affinity(d, &tmask); 150 + if (cpu_irq < 0) 151 + return cpu_irq; 152 + 153 + gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); 154 + gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data; 155 + 156 + /* switch IRQ's for devices below LASI/WAX to other CPU */ 157 + gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR); 158 + 159 + irq_data_update_effective_affinity(d, &tmask); 160 + 161 + return IRQ_SET_MASK_OK; 162 + } 163 + #endif 164 + 165 + 166 static struct irq_chip gsc_asic_interrupt_type = { 167 .name = "GSC-ASIC", 168 .irq_unmask = gsc_asic_unmask_irq, 169 .irq_mask = gsc_asic_mask_irq, 170 + #ifdef CONFIG_SMP 171 + .irq_set_affinity = gsc_set_affinity_irq, 172 + #endif 173 }; 174 175 int gsc_assign_irq(struct irq_chip *type, void *data)
+1
drivers/parisc/gsc.h
··· 31 int version; 32 int type; 33 int eim; 34 int global_irq[32]; 35 }; 36
··· 31 int version; 32 int type; 33 int eim; 34 + struct gsc_irq gsc_irq; 35 int global_irq[32]; 36 }; 37
+3 -4
drivers/parisc/lasi.c
··· 163 { 164 extern void (*chassis_power_off)(void); 165 struct gsc_asic *lasi; 166 - struct gsc_irq gsc_irq; 167 int ret; 168 169 lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); ··· 184 lasi_init_irq(lasi); 185 186 /* the IRQ lasi should use */ 187 - dev->irq = gsc_alloc_irq(&gsc_irq); 188 if (dev->irq < 0) { 189 printk(KERN_ERR "%s(): cannot get GSC irq\n", 190 __func__); ··· 192 return -EBUSY; 193 } 194 195 - lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; 196 197 - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); 198 if (ret < 0) { 199 kfree(lasi); 200 return ret;
··· 163 { 164 extern void (*chassis_power_off)(void); 165 struct gsc_asic *lasi; 166 int ret; 167 168 lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); ··· 185 lasi_init_irq(lasi); 186 187 /* the IRQ lasi should use */ 188 + dev->irq = gsc_alloc_irq(&lasi->gsc_irq); 189 if (dev->irq < 0) { 190 printk(KERN_ERR "%s(): cannot get GSC irq\n", 191 __func__); ··· 193 return -EBUSY; 194 } 195 196 + lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data; 197 198 + ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); 199 if (ret < 0) { 200 kfree(lasi); 201 return ret;
+3 -4
drivers/parisc/wax.c
··· 68 { 69 struct gsc_asic *wax; 70 struct parisc_device *parent; 71 - struct gsc_irq gsc_irq; 72 int ret; 73 74 wax = kzalloc(sizeof(*wax), GFP_KERNEL); ··· 84 wax_init_irq(wax); 85 86 /* the IRQ wax should use */ 87 - dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); 88 if (dev->irq < 0) { 89 printk(KERN_ERR "%s(): cannot get GSC irq\n", 90 __func__); ··· 92 return -EBUSY; 93 } 94 95 - wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; 96 97 - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); 98 if (ret < 0) { 99 kfree(wax); 100 return ret;
··· 68 { 69 struct gsc_asic *wax; 70 struct parisc_device *parent; 71 int ret; 72 73 wax = kzalloc(sizeof(*wax), GFP_KERNEL); ··· 85 wax_init_irq(wax); 86 87 /* the IRQ wax should use */ 88 + dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ); 89 if (dev->irq < 0) { 90 printk(KERN_ERR "%s(): cannot get GSC irq\n", 91 __func__); ··· 93 return -EBUSY; 94 } 95 96 + wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data; 97 98 + ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); 99 if (ret < 0) { 100 kfree(wax); 101 return ret;