···140 .name = "IPI_call"141};14200000000000000000000000000000000000000000000000000000000000000000000000000143/*144 * Common setup before any secondaries are started145 * Make sure all CPU's are in a sensible state before we boot any of the146 * secondarys147 */148-void plat_smp_setup(void)149{150- unsigned long val;151- int i, num;152153#ifdef CONFIG_MIPS_MT_FPAFF154 /* If we have an FPU, enroll ourselves in the FPU-full mask */···240 /* Put MVPE's into 'configuration state' */241 set_c0_mvpcontrol(MVPCONTROL_VPC);242243- val = read_c0_mvpconf0();0244245 /* we'll always have more TC's than VPE's, so loop setting everything246 to a sensible state */247- for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {248- settc(i);249250- /* VPE's */251- if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {252-253- /* deactivate all but vpe0 */254- if (i != 0) {255- unsigned long tmp = read_vpe_c0_vpeconf0();256-257- tmp &= ~VPECONF0_VPA;258-259- /* master VPE */260- tmp |= VPECONF0_MVP;261- write_vpe_c0_vpeconf0(tmp);262-263- /* Record this as available CPU */264- cpu_set(i, phys_cpu_present_map);265- __cpu_number_map[i] = ++num;266- __cpu_logical_map[num] = i;267- }268-269- /* disable multi-threading with TC's */270- write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);271-272- if (i != 0) {273- write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);274-275- /* set config to be the same as vpe0, particularly kseg0 coherency alg */276- write_vpe_c0_config( read_c0_config());277-278- /* make sure there are no software interrupts pending */279- write_vpe_c0_cause(0);280-281- /* Propagate Config7 */282- write_vpe_c0_config7(read_c0_config7());283- }284-285- }286-287- /* TC's */288-289- if (i != 0) {290- unsigned long tmp;291-292- /* bind a TC to each VPE, May as well put all excess TC's293- on the last VPE */294- if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )295- write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );296- else {297- write_tc_c0_tcbind( read_tc_c0_tcbind() | i);298-299- /* and set XTC */300- write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));301- }302-303- tmp = read_tc_c0_tcstatus();304-305- /* mark not allocated and not dynamically allocatable */306- tmp &= ~(TCSTATUS_A | TCSTATUS_DA);307- tmp |= TCSTATUS_IXMT; /* interrupt exempt */308- write_tc_c0_tcstatus(tmp);309-310- write_tc_c0_tchalt(TCHALT_H);311- }312 }313314 /* Release config state */···257258 /* We'll wait until starting the secondaries before starting MVPE */259260- printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);261}262263void __init plat_prepare_cpus(unsigned int max_cpus)
···140 .name = "IPI_call"141};142143+static void __init smp_copy_vpe_config(void)144+{145+ write_vpe_c0_status(146+ (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);147+148+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */149+ write_vpe_c0_config( read_c0_config());150+151+ /* make sure there are no software interrupts pending */152+ write_vpe_c0_cause(0);153+154+ /* Propagate Config7 */155+ write_vpe_c0_config7(read_c0_config7());156+}157+158+static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,159+ unsigned int ncpu)160+{161+ if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))162+ return ncpu;163+164+ /* Deactivate all but VPE 0 */165+ if (tc != 0) {166+ unsigned long tmp = read_vpe_c0_vpeconf0();167+168+ tmp &= ~VPECONF0_VPA;169+170+ /* master VPE */171+ tmp |= VPECONF0_MVP;172+ write_vpe_c0_vpeconf0(tmp);173+174+ /* Record this as available CPU */175+ cpu_set(tc, phys_cpu_present_map);176+ __cpu_number_map[tc] = ++ncpu;177+ __cpu_logical_map[ncpu] = tc;178+ }179+180+ /* Disable multi-threading with TC's */181+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);182+183+ if (tc != 0)184+ smp_copy_vpe_config();185+186+ return ncpu;187+}188+189+static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)190+{191+ unsigned long tmp;192+193+ if (!tc)194+ return;195+196+ /* bind a TC to each VPE, May as well put all excess TC's197+ on the last VPE */198+ if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))199+ write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));200+ else {201+ write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);202+203+ /* and set XTC */204+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));205+ }206+207+ tmp = read_tc_c0_tcstatus();208+209+ /* mark not allocated and not dynamically allocatable */210+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);211+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */212+ write_tc_c0_tcstatus(tmp);213+214+ write_tc_c0_tchalt(TCHALT_H);215+}216+217/*218 * Common setup before any secondaries are started219 * Make sure all CPU's are in a sensible state before we boot any of the220 * secondarys221 */222+void __init plat_smp_setup(void)223{224+ unsigned int mvpconf0, ntc, tc, ncpu = 0;0225226#ifdef CONFIG_MIPS_MT_FPAFF227 /* If we have an FPU, enroll ourselves in the FPU-full mask */···167 /* Put MVPE's into 'configuration state' */168 set_c0_mvpcontrol(MVPCONTROL_VPC);169170+ mvpconf0 = read_c0_mvpconf0();171+ ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;172173 /* we'll always have more TC's than VPE's, so loop setting everything174 to a sensible state */175+ for (tc = 0; tc <= ntc; tc++) {176+ settc(tc);177178+ smp_tc_init(tc, mvpconf0);179+ ncpu = smp_vpe_init(tc, mvpconf0, ncpu);000000000000000000000000000000000000000000000000000000000000180 }181182 /* Release config state */···243244 /* We'll wait until starting the secondaries before starting MVPE */245246+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);247}248249void __init plat_prepare_cpus(unsigned int max_cpus)