Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/events: Refactor evtchn_to_irq array to be dynamically allocated

Refactor static array evtchn_to_irq array to be dynamically allocated by
implementing get and set functions for accesses to the array.

Two new port ops are added: max_channels (maximum supported number of
event channels) and nr_channels (number of currently usable event
channels). For the 2-level ABI, these numbers are both the same as
the shared data structure is a fixed size. For the FIFO ABI, these
will be different as the event array is expanded dynamically.

This allows more than 65000 event channels so an unsigned short is no
longer sufficient for an event channel port number and unsigned int is
used instead.

Signed-off-by: Malcolm Crossley <malcolm.crossley@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

authored by

David Vrabel and committed by
Konrad Rzeszutek Wilk
d0b075ff 08385875

+149 -55
+9 -2
drivers/xen/events/events_2l.c
··· 41 41 static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD], 42 42 cpu_evtchn_mask); 43 43 44 + static unsigned evtchn_2l_max_channels(void) 45 + { 46 + return NR_EVENT_CHANNELS; 47 + } 48 + 44 49 static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) 45 50 { 46 51 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); ··· 243 238 244 239 /* Process port. */ 245 240 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; 246 - irq = evtchn_to_irq[port]; 241 + irq = get_evtchn_to_irq(port); 247 242 248 243 if (irq != -1) { 249 244 desc = irq_to_desc(irq); ··· 337 332 int word_idx = i / BITS_PER_EVTCHN_WORD; 338 333 printk(" %d: event %d -> irq %d%s%s%s\n", 339 334 cpu_from_evtchn(i), i, 340 - evtchn_to_irq[i], 335 + get_evtchn_to_irq(i), 341 336 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) 342 337 ? "" : " l2-clear", 343 338 !sync_test_bit(i, BM(sh->evtchn_mask)) ··· 353 348 } 354 349 355 350 static const struct evtchn_ops evtchn_ops_2l = { 351 + .max_channels = evtchn_2l_max_channels, 352 + .nr_channels = evtchn_2l_max_channels, 356 353 .bind_to_cpu = evtchn_2l_bind_to_cpu, 357 354 .clear_pending = evtchn_2l_clear_pending, 358 355 .set_pending = evtchn_2l_set_pending,
+124 -51
drivers/xen/events/events_base.c
··· 77 77 /* IRQ <-> IPI mapping */ 78 78 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 79 79 80 - int *evtchn_to_irq; 80 + int **evtchn_to_irq; 81 81 #ifdef CONFIG_X86 82 82 static unsigned long *pirq_eoi_map; 83 83 #endif 84 84 static bool (*pirq_needs_eoi)(unsigned irq); 85 + 86 + #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) 87 + #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) 88 + #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) 85 89 86 90 /* Xen will never allocate port zero for any purpose. */ 87 91 #define VALID_EVTCHN(chn) ((chn) != 0) ··· 95 91 static struct irq_chip xen_pirq_chip; 96 92 static void enable_dynirq(struct irq_data *data); 97 93 static void disable_dynirq(struct irq_data *data); 94 + 95 + static void clear_evtchn_to_irq_row(unsigned row) 96 + { 97 + unsigned col; 98 + 99 + for (col = 0; col < EVTCHN_PER_ROW; col++) 100 + evtchn_to_irq[row][col] = -1; 101 + } 102 + 103 + static void clear_evtchn_to_irq_all(void) 104 + { 105 + unsigned row; 106 + 107 + for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { 108 + if (evtchn_to_irq[row] == NULL) 109 + continue; 110 + clear_evtchn_to_irq_row(row); 111 + } 112 + } 113 + 114 + static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) 115 + { 116 + unsigned row; 117 + unsigned col; 118 + 119 + if (evtchn >= xen_evtchn_max_channels()) 120 + return -EINVAL; 121 + 122 + row = EVTCHN_ROW(evtchn); 123 + col = EVTCHN_COL(evtchn); 124 + 125 + if (evtchn_to_irq[row] == NULL) { 126 + /* Unallocated irq entries return -1 anyway */ 127 + if (irq == -1) 128 + return 0; 129 + 130 + evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); 131 + if (evtchn_to_irq[row] == NULL) 132 + return -ENOMEM; 133 + 134 + clear_evtchn_to_irq_row(row); 135 + } 136 + 137 + evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 138 + return 0; 139 + } 140 + 141 + int get_evtchn_to_irq(unsigned evtchn) 142 + { 143 + if (evtchn >= xen_evtchn_max_channels()) 144 + return -1; 145 + if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) 146 + return -1; 147 + return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; 148 + } 98 149 99 150 /* Get info for IRQ */ 100 151 struct irq_info *info_for_irq(unsigned irq) ··· 161 102 static int xen_irq_info_common_setup(struct irq_info *info, 162 103 unsigned irq, 163 104 enum xen_irq_type type, 164 - unsigned short evtchn, 105 + unsigned evtchn, 165 106 unsigned short cpu) 166 107 { 108 + int ret; 167 109 168 110 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 169 111 ··· 173 113 info->evtchn = evtchn; 174 114 info->cpu = cpu; 175 115 176 - evtchn_to_irq[evtchn] = irq; 116 + ret = set_evtchn_to_irq(evtchn, irq); 117 + if (ret < 0) 118 + return ret; 177 119 178 120 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 179 121 ··· 183 121 } 184 122 185 123 static int xen_irq_info_evtchn_setup(unsigned irq, 186 - unsigned short evtchn) 124 + unsigned evtchn) 187 125 { 188 126 struct irq_info *info = info_for_irq(irq); 189 127 ··· 192 130 193 131 static int xen_irq_info_ipi_setup(unsigned cpu, 194 132 unsigned irq, 195 - unsigned short evtchn, 133 + unsigned evtchn, 196 134 enum ipi_vector ipi) 197 135 { 198 136 struct irq_info *info = info_for_irq(irq); ··· 206 144 207 145 static int xen_irq_info_virq_setup(unsigned cpu, 208 146 unsigned irq, 209 - unsigned short evtchn, 210 - unsigned short virq) 147 + unsigned evtchn, 148 + unsigned virq) 211 149 { 212 150 struct irq_info *info = info_for_irq(irq); 213 151 ··· 219 157 } 220 158 221 159 static int xen_irq_info_pirq_setup(unsigned irq, 222 - unsigned short evtchn, 223 - unsigned short pirq, 224 - unsigned short gsi, 160 + unsigned evtchn, 161 + unsigned pirq, 162 + unsigned gsi, 225 163 uint16_t domid, 226 164 unsigned char flags) 227 165 { ··· 233 171 info->u.pirq.flags = flags; 234 172 235 173 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 174 + } 175 + 176 + static void xen_irq_info_cleanup(struct irq_info *info) 177 + { 178 + set_evtchn_to_irq(info->evtchn, -1); 179 + info->evtchn = 0; 236 180 } 237 181 238 182 /* ··· 254 186 255 187 unsigned irq_from_evtchn(unsigned int evtchn) 256 188 { 257 - return evtchn_to_irq[evtchn]; 189 + return get_evtchn_to_irq(evtchn); 258 190 } 259 191 EXPORT_SYMBOL_GPL(irq_from_evtchn); 260 192 ··· 305 237 306 238 unsigned int cpu_from_evtchn(unsigned int evtchn) 307 239 { 308 - int irq = evtchn_to_irq[evtchn]; 240 + int irq = get_evtchn_to_irq(evtchn); 309 241 unsigned ret = 0; 310 242 311 243 if (irq != -1) ··· 331 263 332 264 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 333 265 { 334 - int irq = evtchn_to_irq[chn]; 266 + int irq = get_evtchn_to_irq(chn); 335 267 struct irq_info *info = info_for_irq(irq); 336 268 337 269 BUG_ON(irq == -1); ··· 454 386 irq_free_desc(irq); 455 387 } 456 388 389 + static void xen_evtchn_close(unsigned int port) 390 + { 391 + struct evtchn_close close; 392 + 393 + close.port = port; 394 + if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 395 + BUG(); 396 + 397 + /* Closed ports are implicitly re-bound to VCPU0. */ 398 + bind_evtchn_to_cpu(port, 0); 399 + } 400 + 457 401 static void pirq_query_unmask(int irq) 458 402 { 459 403 struct physdev_irq_status_query irq_status; ··· 538 458 539 459 pirq_query_unmask(irq); 540 460 541 - evtchn_to_irq[evtchn] = irq; 461 + rc = set_evtchn_to_irq(evtchn, irq); 462 + if (rc != 0) { 463 + pr_err("irq%d: Failed to set port to irq mapping (%d)\n", 464 + irq, rc); 465 + xen_evtchn_close(evtchn); 466 + return 0; 467 + } 542 468 bind_evtchn_to_cpu(evtchn, 0); 543 469 info->evtchn = evtchn; 544 470 ··· 562 476 563 477 static void shutdown_pirq(struct irq_data *data) 564 478 { 565 - struct evtchn_close close; 566 479 unsigned int irq = data->irq; 567 480 struct irq_info *info = info_for_irq(irq); 568 - int evtchn = evtchn_from_irq(irq); 481 + unsigned evtchn = evtchn_from_irq(irq); 569 482 570 483 BUG_ON(info->type != IRQT_PIRQ); 571 484 ··· 572 487 return; 573 488 574 489 mask_evtchn(evtchn); 575 - 576 - close.port = evtchn; 577 - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 578 - BUG(); 579 - 580 - bind_evtchn_to_cpu(evtchn, 0); 581 - evtchn_to_irq[evtchn] = -1; 582 - info->evtchn = 0; 490 + xen_evtchn_close(evtchn); 491 + xen_irq_info_cleanup(info); 583 492 } 584 493 585 494 static void enable_pirq(struct irq_data *data) ··· 604 525 605 526 static void __unbind_from_irq(unsigned int irq) 606 527 { 607 - struct evtchn_close close; 608 528 int evtchn = evtchn_from_irq(irq); 609 529 struct irq_info *info = irq_get_handler_data(irq); 610 530 ··· 614 536 } 615 537 616 538 if (VALID_EVTCHN(evtchn)) { 617 - close.port = evtchn; 618 - if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 619 - BUG(); 539 + unsigned int cpu = cpu_from_irq(irq); 540 + 541 + xen_evtchn_close(evtchn); 620 542 621 543 switch (type_from_irq(irq)) { 622 544 case IRQT_VIRQ: 623 - per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 624 - [virq_from_irq(irq)] = -1; 545 + per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; 625 546 break; 626 547 case IRQT_IPI: 627 - per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 628 - [ipi_from_irq(irq)] = -1; 548 + per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; 629 549 break; 630 550 default: 631 551 break; 632 552 } 633 553 634 - /* Closed ports are implicitly re-bound to VCPU0. */ 635 - bind_evtchn_to_cpu(evtchn, 0); 636 - 637 - evtchn_to_irq[evtchn] = -1; 554 + xen_irq_info_cleanup(info); 638 555 } 639 556 640 557 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); ··· 833 760 int irq; 834 761 int ret; 835 762 763 + if (evtchn >= xen_evtchn_max_channels()) 764 + return -ENOMEM; 765 + 836 766 mutex_lock(&irq_mapping_update_lock); 837 767 838 - irq = evtchn_to_irq[evtchn]; 768 + irq = get_evtchn_to_irq(evtchn); 839 769 840 770 if (irq == -1) { 841 771 irq = xen_allocate_irq_dynamic(); ··· 928 852 int port, rc = -ENOENT; 929 853 930 854 memset(&status, 0, sizeof(status)); 931 - for (port = 0; port <= NR_EVENT_CHANNELS; port++) { 855 + for (port = 0; port < xen_evtchn_max_channels(); port++) { 932 856 status.dom = DOMID_SELF; 933 857 status.port = port; 934 858 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); ··· 1098 1022 1099 1023 int evtchn_make_refcounted(unsigned int evtchn) 1100 1024 { 1101 - int irq = evtchn_to_irq[evtchn]; 1025 + int irq = get_evtchn_to_irq(evtchn); 1102 1026 struct irq_info *info; 1103 1027 1104 1028 if (irq == -1) ··· 1123 1047 struct irq_info *info; 1124 1048 int err = -ENOENT; 1125 1049 1126 - if (evtchn >= NR_EVENT_CHANNELS) 1050 + if (evtchn >= xen_evtchn_max_channels()) 1127 1051 return -EINVAL; 1128 1052 1129 1053 mutex_lock(&irq_mapping_update_lock); 1130 1054 1131 - irq = evtchn_to_irq[evtchn]; 1055 + irq = get_evtchn_to_irq(evtchn); 1132 1056 if (irq == -1) 1133 1057 goto done; 1134 1058 ··· 1152 1076 1153 1077 void evtchn_put(unsigned int evtchn) 1154 1078 { 1155 - int irq = evtchn_to_irq[evtchn]; 1079 + int irq = get_evtchn_to_irq(evtchn); 1156 1080 if (WARN_ON(irq == -1)) 1157 1081 return; 1158 1082 unbind_from_irq(irq); ··· 1239 1163 mutex_lock(&irq_mapping_update_lock); 1240 1164 1241 1165 /* After resume the irq<->evtchn mappings are all cleared out */ 1242 - BUG_ON(evtchn_to_irq[evtchn] != -1); 1166 + BUG_ON(get_evtchn_to_irq(evtchn) != -1); 1243 1167 /* Expect irq to have been bound before, 1244 1168 so there should be a proper type */ 1245 1169 BUG_ON(info->type == IRQT_UNBOUND); ··· 1524 1448 struct irq_info *info; 1525 1449 1526 1450 /* New event-channel space is not 'live' yet. */ 1527 - for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1451 + for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) 1528 1452 mask_evtchn(evtchn); 1529 1453 1530 1454 /* No IRQ <-> event-channel mappings. */ 1531 1455 list_for_each_entry(info, &xen_irq_list_head, list) 1532 1456 info->evtchn = 0; /* zap event-channel binding */ 1533 1457 1534 - for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1535 - evtchn_to_irq[evtchn] = -1; 1458 + clear_evtchn_to_irq_all(); 1536 1459 1537 1460 for_each_possible_cpu(cpu) { 1538 1461 restore_cpu_virqs(cpu); ··· 1628 1553 1629 1554 xen_evtchn_2l_init(); 1630 1555 1631 - evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1632 - GFP_KERNEL); 1556 + evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), 1557 + sizeof(*evtchn_to_irq), GFP_KERNEL); 1633 1558 BUG_ON(!evtchn_to_irq); 1634 - for (i = 0; i < NR_EVENT_CHANNELS; i++) 1635 - evtchn_to_irq[i] = -1; 1636 1559 1637 1560 /* No event channels are 'live' right now. */ 1638 - for (i = 0; i < NR_EVENT_CHANNELS; i++) 1561 + for (i = 0; i < xen_evtchn_nr_channels(); i++) 1639 1562 mask_evtchn(i); 1640 1563 1641 1564 pirq_needs_eoi = pirq_needs_eoi_flag;
+16 -2
drivers/xen/events/events_internal.h
··· 35 35 int refcnt; 36 36 enum xen_irq_type type; /* type */ 37 37 unsigned irq; 38 - unsigned short evtchn; /* event channel */ 38 + unsigned int evtchn; /* event channel */ 39 39 unsigned short cpu; /* cpu bound */ 40 40 41 41 union { ··· 55 55 #define PIRQ_SHAREABLE (1 << 1) 56 56 57 57 struct evtchn_ops { 58 + unsigned (*max_channels)(void); 59 + unsigned (*nr_channels)(void); 60 + 58 61 int (*setup)(struct irq_info *info); 59 62 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); 60 63 ··· 73 70 74 71 extern const struct evtchn_ops *evtchn_ops; 75 72 76 - extern int *evtchn_to_irq; 73 + extern int **evtchn_to_irq; 74 + int get_evtchn_to_irq(unsigned int evtchn); 77 75 78 76 struct irq_info *info_for_irq(unsigned irq); 79 77 unsigned cpu_from_irq(unsigned irq); 80 78 unsigned cpu_from_evtchn(unsigned int evtchn); 79 + 80 + static inline unsigned xen_evtchn_max_channels(void) 81 + { 82 + return evtchn_ops->max_channels(); 83 + } 84 + 85 + static inline unsigned xen_evtchn_nr_channels(void) 86 + { 87 + return evtchn_ops->nr_channels(); 88 + } 81 89 82 90 /* 83 91 * Do any ABI specific setup for a bound event channel before it can