[IA64] SN: Add support for CPU disable

Add additional support for CPU disable on SN platforms.
Correctly setup the smp_affinity mask for I/O error IRQs.
Restrict the use of the feature to Altix 4000 and 450 systems
running with a CPU disable capable PROM, and do not allow disabling
of CPU 0.

Signed-off-by: John Keller <jpk@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by John Keller and committed by Tony Luck 6e9de181 1aac0b57

+64 -4
+6
arch/ia64/kernel/smpboot.c
··· 58 #include <asm/system.h> 59 #include <asm/tlbflush.h> 60 #include <asm/unistd.h> 61 62 #define SMP_DEBUG 0 63 ··· 729 if (cpu == 0 && !bsp_remove_ok) { 730 printk ("Your platform does not support removal of BSP\n"); 731 return (-EBUSY); 732 } 733 734 cpu_clear(cpu, cpu_online_map);
··· 58 #include <asm/system.h> 59 #include <asm/tlbflush.h> 60 #include <asm/unistd.h> 61 + #include <asm/sn/arch.h> 62 63 #define SMP_DEBUG 0 64 ··· 728 if (cpu == 0 && !bsp_remove_ok) { 729 printk ("Your platform does not support removal of BSP\n"); 730 return (-EBUSY); 731 + } 732 + 733 + if (ia64_platform_is("sn2")) { 734 + if (!sn_cpu_disable_allowed(cpu)) 735 + return -EBUSY; 736 } 737 738 cpu_clear(cpu, cpu_online_map);
+10 -4
arch/ia64/sn/kernel/huberror.c
··· 185 */ 186 void hub_error_init(struct hubdev_info *hubdev_info) 187 { 188 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, 189 - "SN_hub_error", (void *)hubdev_info)) 190 printk("hub_error_init: Failed to request_irq for 0x%p\n", 191 hubdev_info); 192 - return; 193 } 194 195 ··· 205 */ 206 void ice_error_init(struct hubdev_info *hubdev_info) 207 { 208 if (request_irq 209 (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", 210 - (void *)hubdev_info)) 211 printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", 212 hubdev_info); 213 - return; 214 } 215
··· 185 */ 186 void hub_error_init(struct hubdev_info *hubdev_info) 187 { 188 + 189 if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED, 190 + "SN_hub_error", (void *)hubdev_info)) { 191 printk("hub_error_init: Failed to request_irq for 0x%p\n", 192 hubdev_info); 193 + return; 194 + } 195 + sn_set_err_irq_affinity(SGI_II_ERROR); 196 } 197 198 ··· 202 */ 203 void ice_error_init(struct hubdev_info *hubdev_info) 204 { 205 + 206 if (request_irq 207 (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error", 208 + (void *)hubdev_info)) { 209 printk("ice_error_init: request_irq() error hubdev_info 0x%p\n", 210 hubdev_info); 211 + return; 212 + } 213 + sn_set_err_irq_affinity(SGI_TIO_ERROR); 214 } 215
+15
arch/ia64/sn/kernel/irq.c
··· 19 #include <asm/sn/pcidev.h> 20 #include <asm/sn/shub_mmr.h> 21 #include <asm/sn/sn_sal.h> 22 23 static void force_interrupt(int irq); 24 static void register_intr_pda(struct sn_irq_info *sn_irq_info); ··· 233 sn_irq_lh[irq], list) 234 (void)sn_retarget_vector(sn_irq_info, nasid, slice); 235 } 236 237 static void 238 sn_mask_irq(unsigned int irq)
··· 19 #include <asm/sn/pcidev.h> 20 #include <asm/sn/shub_mmr.h> 21 #include <asm/sn/sn_sal.h> 22 + #include <asm/sn/sn_feature_sets.h> 23 24 static void force_interrupt(int irq); 25 static void register_intr_pda(struct sn_irq_info *sn_irq_info); ··· 232 sn_irq_lh[irq], list) 233 (void)sn_retarget_vector(sn_irq_info, nasid, slice); 234 } 235 + 236 + #ifdef CONFIG_SMP 237 + void sn_set_err_irq_affinity(unsigned int irq) 238 + { 239 + /* 240 + * On systems which support CPU disabling (SHub2), all error interrupts 241 + * are targetted at the boot CPU. 242 + */ 243 + if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) 244 + set_irq_affinity_info(irq, cpu_physical_id(0), 0); 245 + } 246 + #else 247 + void sn_set_err_irq_affinity(unsigned int irq) { } 248 + #endif 249 250 static void 251 sn_mask_irq(unsigned int irq)
+26
arch/ia64/sn/kernel/sn2/sn2_smp.c
··· 40 #include <asm/sn/shub_mmr.h> 41 #include <asm/sn/nodepda.h> 42 #include <asm/sn/rw_mmr.h> 43 44 DEFINE_PER_CPU(struct ptc_stats, ptcstats); 45 DECLARE_PER_CPU(struct ptc_stats, ptcstats); ··· 429 430 sn_send_IPI_phys(nasid, physid, vector, delivery_mode); 431 } 432 433 #ifdef CONFIG_PROC_FS 434
··· 40 #include <asm/sn/shub_mmr.h> 41 #include <asm/sn/nodepda.h> 42 #include <asm/sn/rw_mmr.h> 43 + #include <asm/sn/sn_feature_sets.h> 44 45 DEFINE_PER_CPU(struct ptc_stats, ptcstats); 46 DECLARE_PER_CPU(struct ptc_stats, ptcstats); ··· 428 429 sn_send_IPI_phys(nasid, physid, vector, delivery_mode); 430 } 431 + 432 + #ifdef CONFIG_HOTPLUG_CPU 433 + /** 434 + * sn_cpu_disable_allowed - Determine if a CPU can be disabled. 435 + * @cpu - CPU that is requested to be disabled. 436 + * 437 + * CPU disable is only allowed on SHub2 systems running with a PROM 438 + * that supports CPU disable. It is not permitted to disable the boot processor. 439 + */ 440 + bool sn_cpu_disable_allowed(int cpu) 441 + { 442 + if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) { 443 + if (cpu != 0) 444 + return true; 445 + else 446 + printk(KERN_WARNING 447 + "Disabling the boot processor is not allowed.\n"); 448 + 449 + } else 450 + printk(KERN_WARNING 451 + "CPU disable is not supported on this system.\n"); 452 + 453 + return false; 454 + } 455 + #endif /* CONFIG_HOTPLUG_CPU */ 456 457 #ifdef CONFIG_PROC_FS 458
+1
arch/ia64/sn/pci/pcibr/pcibr_provider.c
··· 145 printk(KERN_WARNING 146 "pcibr cannot allocate interrupt for error handler\n"); 147 } 148 149 /* 150 * Update the Bridge with the "kernel" pagesize
··· 145 printk(KERN_WARNING 146 "pcibr cannot allocate interrupt for error handler\n"); 147 } 148 + sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); 149 150 /* 151 * Update the Bridge with the "kernel" pagesize
+2
arch/ia64/sn/pci/tioca_provider.c
··· 654 __FUNCTION__, SGI_TIOCA_ERROR, 655 (int)tioca_common->ca_common.bs_persist_busnum); 656 657 /* Setup locality information */ 658 controller->node = tioca_kern->ca_closest_node; 659 return tioca_common;
··· 654 __FUNCTION__, SGI_TIOCA_ERROR, 655 (int)tioca_common->ca_common.bs_persist_busnum); 656 657 + sn_set_err_irq_affinity(SGI_TIOCA_ERROR); 658 + 659 /* Setup locality information */ 660 controller->node = tioca_kern->ca_closest_node; 661 return tioca_common;
+1
arch/ia64/sn/pci/tioce_provider.c
··· 1034 tioce_common->ce_pcibus.bs_persist_segment, 1035 tioce_common->ce_pcibus.bs_persist_busnum); 1036 1037 return tioce_common; 1038 } 1039
··· 1034 tioce_common->ce_pcibus.bs_persist_segment, 1035 tioce_common->ce_pcibus.bs_persist_busnum); 1036 1037 + sn_set_err_irq_affinity(SGI_PCIASIC_ERROR); 1038 return tioce_common; 1039 } 1040
+1
include/asm-ia64/sn/arch.h
··· 81 extern u8 sn_region_size; 82 83 extern void sn_flush_all_caches(long addr, long bytes); 84 85 #endif /* _ASM_IA64_SN_ARCH_H */
··· 81 extern u8 sn_region_size; 82 83 extern void sn_flush_all_caches(long addr, long bytes); 84 + extern bool sn_cpu_disable_allowed(int cpu); 85 86 #endif /* _ASM_IA64_SN_ARCH_H */
+1
include/asm-ia64/sn/intr.h
··· 60 int, nasid_t, int); 61 extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); 62 extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); 63 extern struct list_head **sn_irq_lh; 64 65 #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
··· 60 int, nasid_t, int); 61 extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); 62 extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); 63 + extern void sn_set_err_irq_affinity(unsigned int); 64 extern struct list_head **sn_irq_lh; 65 66 #define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+1
include/asm-ia64/sn/sn_feature_sets.h
··· 31 #define PRF_PAL_CACHE_FLUSH_SAFE 0 32 #define PRF_DEVICE_FLUSH_LIST 1 33 #define PRF_HOTPLUG_SUPPORT 2 34 35 /* --------------------- OS Features -------------------------------*/ 36
··· 31 #define PRF_PAL_CACHE_FLUSH_SAFE 0 32 #define PRF_DEVICE_FLUSH_LIST 1 33 #define PRF_HOTPLUG_SUPPORT 2 34 + #define PRF_CPU_DISABLE_SUPPORT 3 35 36 /* --------------------- OS Features -------------------------------*/ 37