Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] Get rid of additional_cpus kernel parameter.

It caused only a lot of confusion. From now on cpu hotplug of up to
NR_CPUS will work by default. If somebody wants to limit that then
the possible_cpus parameter can be used.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
48483b32 519580fc

+79 -155
+1 -1
Documentation/cpu-hotplug.txt
··· 50 50 cpu_possible_map = cpu_present_map + additional_cpus 51 51 52 52 (*) Option valid only for following architectures 53 - - x86_64, ia64, s390 53 + - x86_64, ia64 54 54 55 55 ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT 56 56 to determine the number of potentially hot-pluggable cpus. The implementation
-1
arch/s390/kernel/early.c
··· 278 278 setup_lowcore_early(); 279 279 sclp_read_info_early(); 280 280 sclp_facilities_detect(); 281 - sclp_read_cpu_info_early(); 282 281 memsize = sclp_memory_detect(); 283 282 #ifndef CONFIG_64BIT 284 283 /*
-1
arch/s390/kernel/setup.c
··· 922 922 923 923 cpu_init(); 924 924 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 925 - smp_setup_cpu_possible_map(); 926 925 927 926 /* 928 927 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
+76 -104
arch/s390/kernel/smp.c
··· 54 54 cpumask_t cpu_online_map = CPU_MASK_NONE; 55 55 EXPORT_SYMBOL(cpu_online_map); 56 56 57 - cpumask_t cpu_possible_map = CPU_MASK_NONE; 57 + cpumask_t cpu_possible_map = CPU_MASK_ALL; 58 58 EXPORT_SYMBOL(cpu_possible_map); 59 59 60 60 static struct task_struct *current_set[NR_CPUS]; ··· 399 399 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 400 400 return; 401 401 } 402 - zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 402 + zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); 403 403 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu; 404 404 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) == 405 405 sigp_busy) ··· 435 435 return 0; 436 436 } 437 437 438 - /* 439 - * Lets check how many CPUs we have. 440 - */ 441 - static void __init smp_count_cpus(unsigned int *configured_cpus, 442 - unsigned int *standby_cpus) 443 - { 444 - unsigned int cpu; 445 - struct sclp_cpu_info *info; 446 - u16 boot_cpu_addr, cpu_addr; 447 - 448 - boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 449 - current_thread_info()->cpu = 0; 450 - *configured_cpus = 1; 451 - *standby_cpus = 0; 452 - 453 - info = alloc_bootmem_pages(sizeof(*info)); 454 - if (!info) 455 - disabled_wait((unsigned long) __builtin_return_address(0)); 456 - 457 - /* Use sigp detection algorithm if sclp doesn't work. */ 458 - if (sclp_get_cpu_info(info)) { 459 - smp_use_sigp_detection = 1; 460 - for (cpu = 0; cpu <= 65535; cpu++) { 461 - if (cpu == boot_cpu_addr) 462 - continue; 463 - __cpu_logical_map[CPU_INIT_NO] = cpu; 464 - if (cpu_stopped(CPU_INIT_NO)) 465 - (*configured_cpus)++; 466 - } 467 - goto out; 468 - } 469 - 470 - if (info->has_cpu_type) { 471 - for (cpu = 0; cpu < info->combined; cpu++) { 472 - if (info->cpu[cpu].address == boot_cpu_addr) { 473 - smp_cpu_type = info->cpu[cpu].type; 474 - break; 475 - } 476 - } 477 - } 478 - /* Count cpus. */ 479 - for (cpu = 0; cpu < info->combined; cpu++) { 480 - if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 481 - continue; 482 - cpu_addr = info->cpu[cpu].address; 483 - if (cpu_addr == boot_cpu_addr) 484 - continue; 485 - __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 486 - if (!cpu_stopped(CPU_INIT_NO)) { 487 - (*standby_cpus)++; 488 - continue; 489 - } 490 - smp_get_save_area(*configured_cpus, cpu_addr); 491 - (*configured_cpus)++; 492 - } 493 - out: 494 - printk(KERN_INFO "CPUs: %d configured, %d standby\n", 495 - *configured_cpus, *standby_cpus); 496 - free_bootmem((unsigned long) info, sizeof(*info)); 497 - } 498 - 499 438 static int cpu_known(int cpu_id) 500 439 { 501 440 int cpu; ··· 468 529 return 0; 469 530 } 470 531 471 - static int __init_refok smp_rescan_cpus_sclp(cpumask_t avail) 532 + static int smp_rescan_cpus_sclp(cpumask_t avail) 472 533 { 473 534 struct sclp_cpu_info *info; 474 535 int cpu_id, logical_cpu, cpu; ··· 477 538 logical_cpu = first_cpu(avail); 478 539 if (logical_cpu == NR_CPUS) 479 540 return 0; 480 - if (slab_is_available()) 481 - info = kmalloc(sizeof(*info), GFP_KERNEL); 482 - else 483 - info = alloc_bootmem(sizeof(*info)); 541 + info = kmalloc(sizeof(*info), GFP_KERNEL); 484 542 if (!info) 485 543 return -ENOMEM; 486 544 rc = sclp_get_cpu_info(info); ··· 500 564 break; 501 565 } 502 566 out: 503 - if (slab_is_available()) 504 - kfree(info); 505 - else 506 - free_bootmem((unsigned long) info, sizeof(*info)); 567 + kfree(info); 507 568 return rc; 508 569 } 509 570 ··· 508 575 { 509 576 cpumask_t avail; 510 577 511 - cpus_setall(avail); 512 - cpus_and(avail, avail, cpu_possible_map); 513 - cpus_andnot(avail, avail, cpu_present_map); 578 + cpus_xor(avail, cpu_possible_map, cpu_present_map); 514 579 if (smp_use_sigp_detection) 515 580 return smp_rescan_cpus_sigp(avail); 516 581 else 517 582 return smp_rescan_cpus_sclp(avail); 583 + } 584 + 585 + static void __init smp_detect_cpus(void) 586 + { 587 + unsigned int cpu, c_cpus, s_cpus; 588 + struct sclp_cpu_info *info; 589 + u16 boot_cpu_addr, cpu_addr; 590 + 591 + c_cpus = 1; 592 + s_cpus = 0; 593 + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 594 + info = kmalloc(sizeof(*info), GFP_KERNEL); 595 + if (!info) 596 + panic("smp_detect_cpus failed to allocate memory\n"); 597 + /* Use sigp detection algorithm if sclp doesn't work. */ 598 + if (sclp_get_cpu_info(info)) { 599 + smp_use_sigp_detection = 1; 600 + for (cpu = 0; cpu <= 65535; cpu++) { 601 + if (cpu == boot_cpu_addr) 602 + continue; 603 + __cpu_logical_map[CPU_INIT_NO] = cpu; 604 + if (!cpu_stopped(CPU_INIT_NO)) 605 + continue; 606 + smp_get_save_area(c_cpus, cpu); 607 + c_cpus++; 608 + } 609 + goto out; 610 + } 611 + 612 + if (info->has_cpu_type) { 613 + for (cpu = 0; cpu < info->combined; cpu++) { 614 + if (info->cpu[cpu].address == boot_cpu_addr) { 615 + smp_cpu_type = info->cpu[cpu].type; 616 + break; 617 + } 618 + } 619 + } 620 + 621 + for (cpu = 0; cpu < info->combined; cpu++) { 622 + if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 623 + continue; 624 + cpu_addr = info->cpu[cpu].address; 625 + if (cpu_addr == boot_cpu_addr) 626 + continue; 627 + __cpu_logical_map[CPU_INIT_NO] = cpu_addr; 628 + if (!cpu_stopped(CPU_INIT_NO)) { 629 + s_cpus++; 630 + continue; 631 + } 632 + smp_get_save_area(c_cpus, cpu_addr); 633 + c_cpus++; 634 + } 635 + out: 636 + kfree(info); 637 + printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); 638 + lock_cpu_hotplug(); 639 + smp_rescan_cpus(); 640 + unlock_cpu_hotplug(); 518 641 } 519 642 520 643 /* ··· 663 674 return 0; 664 675 } 665 676 666 - static unsigned int __initdata additional_cpus; 667 - static unsigned int __initdata possible_cpus; 668 - 669 - void __init smp_setup_cpu_possible_map(void) 670 - { 671 - unsigned int pos_cpus, cpu; 672 - unsigned int configured_cpus, standby_cpus; 673 - 674 - smp_count_cpus(&configured_cpus, &standby_cpus); 675 - pos_cpus = min(configured_cpus + standby_cpus + additional_cpus, 676 - (unsigned int) NR_CPUS); 677 - if (possible_cpus) 678 - pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 679 - for (cpu = 0; cpu < pos_cpus; cpu++) 680 - cpu_set(cpu, cpu_possible_map); 681 - cpu_present_map = cpumask_of_cpu(0); 682 - smp_rescan_cpus(); 683 - } 684 - 685 - #ifdef CONFIG_HOTPLUG_CPU 686 - 687 - static int __init setup_additional_cpus(char *s) 688 - { 689 - additional_cpus = simple_strtoul(s, NULL, 0); 690 - return 0; 691 - } 692 - early_param("additional_cpus", setup_additional_cpus); 693 - 694 677 static int __init setup_possible_cpus(char *s) 695 678 { 696 - possible_cpus = simple_strtoul(s, NULL, 0); 679 + int pcpus, cpu; 680 + 681 + pcpus = simple_strtoul(s, NULL, 0); 682 + cpu_possible_map = cpumask_of_cpu(0); 683 + for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) 684 + cpu_set(cpu, cpu_possible_map); 697 685 return 0; 698 686 } 699 687 early_param("possible_cpus", setup_possible_cpus); 688 + 689 + #ifdef CONFIG_HOTPLUG_CPU 700 690 701 691 int __cpu_disable(void) 702 692 { ··· 736 768 unsigned int cpu; 737 769 int i; 738 770 771 + smp_detect_cpus(); 772 + 739 773 /* request the 0x1201 emergency signal external interrupt */ 740 774 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 741 775 panic("Couldn't request external interrupt 0x1201"); ··· 786 816 { 787 817 BUG_ON(smp_processor_id() != 0); 788 818 819 + current_thread_info()->cpu = 0; 820 + cpu_set(0, cpu_present_map); 789 821 cpu_set(0, cpu_online_map); 790 822 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 791 823 current_set[0] = current;
+2 -44
drivers/s390/char/sclp_cmd.c
··· 191 191 u8 reserved[4096 - 16]; 192 192 } __attribute__((packed, aligned(PAGE_SIZE))); 193 193 194 - static struct read_cpu_info_sccb __initdata early_read_cpu_info_sccb; 195 - static struct sclp_cpu_info __initdata sclp_cpu_info; 196 - 197 194 static void sclp_fill_cpu_info(struct sclp_cpu_info *info, 198 195 struct read_cpu_info_sccb *sccb) 199 196 { ··· 205 208 info->combined * sizeof(struct sclp_cpu_entry)); 206 209 } 207 210 208 - void __init sclp_read_cpu_info_early(void) 209 - { 210 - int rc; 211 - struct read_cpu_info_sccb *sccb; 212 - 213 - if (!SCLP_HAS_CPU_INFO) 214 - return; 215 - 216 - sccb = &early_read_cpu_info_sccb; 217 - do { 218 - memset(sccb, 0, sizeof(*sccb)); 219 - sccb->header.length = sizeof(*sccb); 220 - rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb); 221 - } while (rc == -EBUSY); 222 - 223 - if (rc) 224 - return; 225 - if (sccb->header.response_code != 0x10) 226 - return; 227 - sclp_fill_cpu_info(&sclp_cpu_info, sccb); 228 - } 229 - 230 - static int __init sclp_get_cpu_info_early(struct sclp_cpu_info *info) 231 - { 232 - if (!SCLP_HAS_CPU_INFO) 233 - return -EOPNOTSUPP; 234 - *info = sclp_cpu_info; 235 - return 0; 236 - } 237 - 238 - static int sclp_get_cpu_info_late(struct sclp_cpu_info *info) 211 + int sclp_get_cpu_info(struct sclp_cpu_info *info) 239 212 { 240 213 int rc; 241 214 struct read_cpu_info_sccb *sccb; 242 215 243 216 if (!SCLP_HAS_CPU_INFO) 244 217 return -EOPNOTSUPP; 245 - sccb = (struct read_cpu_info_sccb *) __get_free_page(GFP_KERNEL 246 - | GFP_DMA); 218 + sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 247 219 if (!sccb) 248 220 return -ENOMEM; 249 - memset(sccb, 0, sizeof(*sccb)); 250 221 sccb->header.length = sizeof(*sccb); 251 222 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); 252 223 if (rc) ··· 229 264 out: 230 265 free_page((unsigned long) sccb); 231 266 return rc; 232 - } 233 - 234 - int __init_refok sclp_get_cpu_info(struct sclp_cpu_info *info) 235 - { 236 - if (slab_is_available()) 237 - return sclp_get_cpu_info_late(info); 238 - return sclp_get_cpu_info_early(info); 239 267 } 240 268 241 269 struct cpu_configure_sccb {
-1
include/asm-s390/sclp.h
··· 46 46 int sclp_cpu_configure(u8 cpu); 47 47 int sclp_cpu_deconfigure(u8 cpu); 48 48 void sclp_read_info_early(void); 49 - void sclp_read_cpu_info_early(void); 50 49 void sclp_facilities_detect(void); 51 50 unsigned long long sclp_memory_detect(void); 52 51 int sclp_sdias_blk_count(void);
-3
include/asm-s390/smp.h
··· 35 35 extern void machine_halt_smp(void); 36 36 extern void machine_power_off_smp(void); 37 37 38 - extern void smp_setup_cpu_possible_map(void); 39 - 40 38 #define NO_PROC_ID 0xFF /* No processor magic marker */ 41 39 42 40 /* ··· 101 103 102 104 #define hard_smp_processor_id() 0 103 105 #define smp_cpu_not_running(cpu) 1 104 - #define smp_setup_cpu_possible_map() do { } while (0) 105 106 #endif 106 107 107 108 extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];