Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/hpet: Move clockevents into channels

Instead of allocating yet another data structure, move the clock event data
into the channel structure. This allows further consolidation of the
reservation code and the reuse of the cached boot config to replace the
extra flags in the clockevent data.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Link: https://lkml.kernel.org/r/20190623132436.185851116@linutronix.de

+65 -86
+3 -3
arch/x86/include/asm/hpet.h
··· 75 75 extern void force_hpet_resume(void); 76 76 77 77 struct irq_data; 78 - struct hpet_dev; 78 + struct hpet_channel; 79 79 struct irq_domain; 80 80 81 81 extern void hpet_msi_unmask(struct irq_data *data); 82 82 extern void hpet_msi_mask(struct irq_data *data); 83 - extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); 83 + extern void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg); 84 84 extern struct irq_domain *hpet_create_irq_domain(int hpet_id); 85 85 extern int hpet_assign_irq(struct irq_domain *domain, 86 - struct hpet_dev *dev, int dev_num); 86 + struct hpet_channel *hc, int dev_num); 87 87 88 88 #ifdef CONFIG_HPET_EMULATE_RTC 89 89
+2 -2
arch/x86/kernel/apic/msi.c
··· 370 370 return d; 371 371 } 372 372 373 - int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev, 373 + int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, 374 374 int dev_num) 375 375 { 376 376 struct irq_alloc_info info; 377 377 378 378 init_irq_alloc_info(&info, NULL); 379 379 info.type = X86_IRQ_ALLOC_TYPE_HPET; 380 - info.hpet_data = dev; 380 + info.hpet_data = hc; 381 381 info.hpet_id = hpet_dev_id(domain); 382 382 info.hpet_index = dev_num; 383 383
+60 -81
arch/x86/kernel/hpet.c
··· 13 13 #undef pr_fmt 14 14 #define pr_fmt(fmt) "hpet: " fmt 15 15 16 - struct hpet_dev { 17 - struct clock_event_device evt; 18 - unsigned int num; 19 - int cpu; 20 - unsigned int irq; 21 - unsigned int flags; 22 - char name[10]; 23 - }; 24 - 25 16 enum hpet_mode { 26 17 HPET_MODE_UNUSED, 27 18 HPET_MODE_LEGACY, ··· 21 30 }; 22 31 23 32 struct hpet_channel { 33 + struct clock_event_device evt; 24 34 unsigned int num; 35 + unsigned int cpu; 25 36 unsigned int irq; 26 37 enum hpet_mode mode; 38 + unsigned int flags; 27 39 unsigned int boot_cfg; 40 + char name[10]; 28 41 }; 29 42 30 43 struct hpet_base { 31 44 unsigned int nr_channels; 45 + unsigned int nr_clockevents; 32 46 unsigned int boot_cfg; 33 47 struct hpet_channel *channels; 34 48 }; ··· 57 61 bool hpet_msi_disable; 58 62 59 63 #ifdef CONFIG_PCI_MSI 60 - static struct hpet_dev *hpet_devs; 61 - static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 64 + static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel); 62 65 static struct irq_domain *hpet_domain; 63 66 #endif 64 67 ··· 74 79 static struct clock_event_device hpet_clockevent; 75 80 76 81 static inline 77 - struct hpet_dev *clockevent_to_channel(struct clock_event_device *evt) 82 + struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt) 78 83 { 79 - return container_of(evt, struct hpet_dev, evt); 84 + return container_of(evt, struct hpet_channel, evt); 80 85 } 81 86 82 87 inline unsigned int hpet_readl(unsigned int a) ··· 455 460 456 461 void hpet_msi_unmask(struct irq_data *data) 457 462 { 458 - struct hpet_dev *hc = irq_data_get_irq_handler_data(data); 463 + struct hpet_channel *hc = irq_data_get_irq_handler_data(data); 459 464 unsigned int cfg; 460 465 461 - /* unmask it */ 462 466 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); 463 467 cfg |= HPET_TN_ENABLE | HPET_TN_FSB; 464 468 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); ··· 465 471 466 472 void hpet_msi_mask(struct irq_data *data) 467 473 { 468 - struct hpet_dev *hc = irq_data_get_irq_handler_data(data); 474 + struct hpet_channel *hc = irq_data_get_irq_handler_data(data); 469 475 unsigned int cfg; 470 476 471 - /* mask it */ 472 477 cfg = hpet_readl(HPET_Tn_CFG(hc->num)); 473 478 cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); 474 479 hpet_writel(cfg, HPET_Tn_CFG(hc->num)); 475 480 } 476 481 477 - void hpet_msi_write(struct hpet_dev *hc, struct msi_msg *msg) 482 + void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg) 478 483 { 479 484 hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num)); 480 485 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4); ··· 496 503 497 504 static int hpet_msi_resume(struct clock_event_device *evt) 498 505 { 499 - struct hpet_dev *hc = clockevent_to_channel(evt); 506 + struct hpet_channel *hc = clockevent_to_channel(evt); 500 507 struct irq_data *data = irq_get_irq_data(hc->irq); 501 508 struct msi_msg msg; 502 509 ··· 515 522 516 523 static irqreturn_t hpet_interrupt_handler(int irq, void *data) 517 524 { 518 - struct hpet_dev *hc = data; 525 + struct hpet_channel *hc = data; 519 526 struct clock_event_device *evt = &hc->evt; 520 527 521 528 if (!evt->event_handler) { ··· 527 534 return IRQ_HANDLED; 528 535 } 529 536 530 - static int hpet_setup_irq(struct hpet_dev *dev) 537 + static int hpet_setup_irq(struct hpet_channel *hc) 531 538 { 532 - 533 - if (request_irq(dev->irq, hpet_interrupt_handler, 539 + if (request_irq(hc->irq, hpet_interrupt_handler, 534 540 IRQF_TIMER | IRQF_NOBALANCING, 535 - dev->name, dev)) 541 + hc->name, hc)) 536 542 return -1; 537 543 538 - disable_irq(dev->irq); 539 - irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); 540 - enable_irq(dev->irq); 544 + disable_irq(hc->irq); 545 + irq_set_affinity(hc->irq, cpumask_of(hc->cpu)); 546 + enable_irq(hc->irq); 541 547 542 - pr_debug("%s irq %d for MSI\n", dev->name, dev->irq); 548 + pr_debug("%s irq %u for MSI\n", hc->name, hc->irq); 543 549 544 550 return 0; 545 551 } 546 552 547 - static void init_one_hpet_msi_clockevent(struct hpet_dev *hc, int cpu) 553 + static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu) 548 554 { 549 555 struct clock_event_device *evt = &hc->evt; 550 556 ··· 551 559 return; 552 560 553 561 hc->cpu = cpu; 554 - per_cpu(cpu_hpet_dev, cpu) = hc; 562 + per_cpu(cpu_hpet_channel, cpu) = hc; 555 563 evt->name = hc->name; 556 564 hpet_setup_irq(hc); 557 565 evt->irq = hc->irq; ··· 573 581 0x7FFFFFFF); 574 582 } 575 583 576 - static struct hpet_dev *hpet_get_unused_timer(void) 584 + static struct hpet_channel *hpet_get_unused_clockevent(void) 577 585 { 578 586 int i; 579 587 580 - if (!hpet_devs) 581 - return NULL; 582 - 583 588 for (i = 0; i < hpet_base.nr_channels; i++) { 584 - struct hpet_dev *hc = &hpet_devs[i]; 589 + struct hpet_channel *hc = hpet_base.channels + i; 585 590 586 591 if (!(hc->flags & HPET_DEV_VALID)) 587 592 continue; ··· 592 603 593 604 static int hpet_cpuhp_online(unsigned int cpu) 594 605 { 595 - struct hpet_dev *hc = hpet_get_unused_timer(); 606 + struct hpet_channel *hc = hpet_get_unused_clockevent(); 596 607 597 608 if (hc) 598 609 init_one_hpet_msi_clockevent(hc, cpu); ··· 601 612 602 613 static int hpet_cpuhp_dead(unsigned int cpu) 603 614 { 604 - struct hpet_dev *hc = per_cpu(cpu_hpet_dev, cpu); 615 + struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu); 605 616 606 617 if (!hc) 607 618 return 0; 608 619 free_irq(hc->irq, hc); 609 620 hc->flags &= ~HPET_DEV_USED; 610 - per_cpu(cpu_hpet_dev, cpu) = NULL; 621 + per_cpu(cpu_hpet_channel, cpu) = NULL; 611 622 return 0; 612 623 } 613 624 614 - #ifdef CONFIG_HPET 615 - /* Reserve at least one timer for userspace (/dev/hpet) */ 616 - #define RESERVE_TIMERS 1 617 - #else 618 - #define RESERVE_TIMERS 0 619 - #endif 620 - 621 - static void __init hpet_msi_capability_lookup(unsigned int start_timer) 625 + static void __init hpet_select_clockevents(void) 622 626 { 623 - unsigned int num_timers; 624 - unsigned int num_timers_used = 0; 625 - int i, irq; 627 + unsigned int i; 626 628 627 - if (hpet_msi_disable) 629 + hpet_base.nr_clockevents = 0; 630 + 631 + /* No point if MSI is disabled or CPU has an Always Runing APIC Timer */ 632 + if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT)) 628 633 return; 629 634 630 - if (boot_cpu_has(X86_FEATURE_ARAT)) 631 - return; 632 - 633 - num_timers = hpet_base.nr_channels; 634 635 hpet_print_config(); 635 636 636 637 hpet_domain = hpet_create_irq_domain(hpet_blockid); 637 638 if (!hpet_domain) 638 639 return; 639 640 640 - hpet_devs = kcalloc(num_timers, sizeof(struct hpet_dev), GFP_KERNEL); 641 - if (!hpet_devs) 642 - return; 641 + for (i = 0; i < hpet_base.nr_channels; i++) { 642 + struct hpet_channel *hc = hpet_base.channels + i; 643 + int irq; 643 644 644 - for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { 645 - struct hpet_dev *hc = &hpet_devs[num_timers_used]; 646 - unsigned int cfg = hpet_base.channels[i].boot_cfg; 645 + if (hc->mode != HPET_MODE_UNUSED) 646 + continue; 647 647 648 - /* Only consider HPET timer with MSI support */ 649 - if (!(cfg & HPET_TN_FSB_CAP)) 648 + /* Only consider HPET channel with MSI support */ 649 + if (!(hc->boot_cfg & HPET_TN_FSB_CAP)) 650 650 continue; 651 651 652 652 hc->flags = 0; 653 - if (cfg & HPET_TN_PERIODIC_CAP) 653 + if (hc->boot_cfg & HPET_TN_PERIODIC_CAP) 654 654 hc->flags |= HPET_DEV_PERI_CAP; 655 655 sprintf(hc->name, "hpet%d", i); 656 - hc->num = i; 657 656 658 657 irq = hpet_assign_irq(hpet_domain, hc, hc->num); 659 658 if (irq <= 0) ··· 650 673 hc->irq = irq; 651 674 hc->flags |= HPET_DEV_FSB_CAP; 652 675 hc->flags |= HPET_DEV_VALID; 653 - num_timers_used++; 654 - if (num_timers_used == num_possible_cpus()) 676 + hc->mode = HPET_MODE_CLOCKEVT; 677 + 678 + if (++hpet_base.nr_clockevents == num_possible_cpus()) 655 679 break; 656 680 } 657 681 658 682 pr_info("%d channels of %d reserved for per-cpu timers\n", 659 - num_timers, num_timers_used); 683 + hpet_base.nr_channels, hpet_base.nr_clockevents); 660 684 } 661 685 662 686 #ifdef CONFIG_HPET ··· 665 687 { 666 688 int i; 667 689 668 - if (!hpet_devs) 669 - return; 670 - 671 690 for (i = 0; i < hpet_base.nr_channels; i++) { 672 - struct hpet_dev *hc = &hpet_devs[i]; 691 + struct hpet_channel *hc = hpet_base.channels + i; 673 692 674 693 if (!(hc->flags & HPET_DEV_VALID)) 675 694 continue; ··· 679 704 680 705 #else 681 706 682 - static inline void hpet_msi_capability_lookup(unsigned int start_timer) { } 707 + static inline void hpet_select_clockevents(void) { } 683 708 684 709 #ifdef CONFIG_HPET 685 710 static inline void hpet_reserve_msi_timers(struct hpet_data *hd) { } ··· 966 991 /* 967 992 * The late initialization runs after the PCI quirks have been invoked 968 993 * which might have detected a system on which the HPET can be enforced. 994 + * 995 + * Also, the MSI machinery is not working yet when the HPET is initialized 996 + * early. 997 + * 998 + * If the HPET is enabled, then: 999 + * 1000 + * 1) Reserve one channel for /dev/hpet if CONFIG_HPET=y 1001 + * 2) Reserve up to num_possible_cpus() channels as per CPU clockevents 1002 + * 3) Setup /dev/hpet if CONFIG_HPET=y 1003 + * 4) Register hotplug callbacks when clockevents are available 969 1004 */ 970 1005 static __init int hpet_late_init(void) 971 1006 { ··· 992 1007 if (!hpet_virt_address) 993 1008 return -ENODEV; 994 1009 995 - if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) 996 - hpet_msi_capability_lookup(2); 997 - else 998 - hpet_msi_capability_lookup(0); 999 - 1010 + hpet_select_device_channel(); 1011 + hpet_select_clockevents(); 1000 1012 hpet_reserve_platform_timers(); 1001 1013 hpet_print_config(); 1002 1014 1003 - if (hpet_msi_disable) 1004 - return 0; 1005 - 1006 - if (boot_cpu_has(X86_FEATURE_ARAT)) 1015 + if (!hpet_base.nr_clockevents) 1007 1016 return 0; 1008 1017 1009 1018 ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",