Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq/for-block' into irq/core

Add the new irq spreading infrastructure.

+295 -125
+1 -2
drivers/base/platform-msi.c
··· 142 142 } 143 143 144 144 for (i = 0; i < nvec; i++) { 145 - desc = alloc_msi_entry(dev); 145 + desc = alloc_msi_entry(dev, 1, NULL); 146 146 if (!desc) 147 147 break; 148 148 149 149 desc->platform.msi_priv_data = data; 150 150 desc->platform.msi_index = base + i; 151 - desc->nvec_used = 1; 152 151 desc->irq = virq ? virq + i : 0; 153 152 154 153 list_add_tail(&desc->list, dev_to_msi_list(dev));
+102 -57
drivers/pci/msi.c
··· 550 550 return ret; 551 551 } 552 552 553 - static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) 553 + static struct msi_desc * 554 + msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity) 554 555 { 555 - u16 control; 556 + struct cpumask *masks = NULL; 556 557 struct msi_desc *entry; 558 + u16 control; 559 + 560 + if (affinity) { 561 + masks = irq_create_affinity_masks(dev->irq_affinity, nvec); 562 + if (!masks) 563 + pr_err("Unable to allocate affinity masks, ignoring\n"); 564 + } 557 565 558 566 /* MSI Entry Initialization */ 559 - entry = alloc_msi_entry(&dev->dev); 567 + entry = alloc_msi_entry(&dev->dev, nvec, masks); 560 568 if (!entry) 561 - return NULL; 569 + goto out; 562 570 563 571 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 564 572 ··· 577 569 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 578 570 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 579 571 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); 580 - entry->nvec_used = nvec; 581 - entry->affinity = dev->irq_affinity; 582 572 583 573 if (control & PCI_MSI_FLAGS_64BIT) 584 574 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; ··· 587 581 if (entry->msi_attrib.maskbit) 588 582 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 589 583 584 + out: 585 + kfree(masks); 590 586 return entry; 591 587 } 592 588 ··· 617 609 * an error, and a positive return value indicates the number of interrupts 618 610 * which could have been allocated. 619 611 */ 620 - static int msi_capability_init(struct pci_dev *dev, int nvec) 612 + static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) 621 613 { 622 614 struct msi_desc *entry; 623 615 int ret; ··· 625 617 626 618 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ 627 619 628 - entry = msi_setup_entry(dev, nvec); 620 + entry = msi_setup_entry(dev, nvec, affinity); 629 621 if (!entry) 630 622 return -ENOMEM; 631 623 ··· 688 680 } 689 681 690 682 static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, 691 - struct msix_entry *entries, int nvec) 683 + struct msix_entry *entries, int nvec, 684 + bool affinity) 692 685 { 693 - const struct cpumask *mask = NULL; 686 + struct cpumask *curmsk, *masks = NULL; 694 687 struct msi_desc *entry; 695 - int cpu = -1, i; 688 + int ret, i; 696 689 697 - for (i = 0; i < nvec; i++) { 698 - if (dev->irq_affinity) { 699 - cpu = cpumask_next(cpu, dev->irq_affinity); 700 - if (cpu >= nr_cpu_ids) 701 - cpu = cpumask_first(dev->irq_affinity); 702 - mask = cpumask_of(cpu); 703 - } 690 + if (affinity) { 691 + masks = irq_create_affinity_masks(dev->irq_affinity, nvec); 692 + if (!masks) 693 + pr_err("Unable to allocate affinity masks, ignoring\n"); 694 + } 704 695 705 - entry = alloc_msi_entry(&dev->dev); 696 + for (i = 0, curmsk = masks; i < nvec; i++) { 697 + entry = alloc_msi_entry(&dev->dev, 1, curmsk); 706 698 if (!entry) { 707 699 if (!i) 708 700 iounmap(base); 709 701 else 710 702 free_msi_irqs(dev); 711 703 /* No enough memory. Don't try again */ 712 - return -ENOMEM; 704 + ret = -ENOMEM; 705 + goto out; 713 706 } 714 707 715 708 entry->msi_attrib.is_msix = 1; ··· 721 712 entry->msi_attrib.entry_nr = i; 722 713 entry->msi_attrib.default_irq = dev->irq; 723 714 entry->mask_base = base; 724 - entry->nvec_used = 1; 725 - entry->affinity = mask; 726 715 727 716 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 717 + if (masks) 718 + curmsk++; 728 719 } 729 - 720 + ret = 0; 721 + out: 722 + kfree(masks); 730 723 return 0; 731 724 } 732 725 ··· 757 746 * single MSI-X irq. A return of zero indicates the successful setup of 758 747 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 759 748 **/ 760 - static int msix_capability_init(struct pci_dev *dev, 761 - struct msix_entry *entries, int nvec) 749 + static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 750 + int nvec, bool affinity) 762 751 { 763 752 int ret; 764 753 u16 control; ··· 773 762 if (!base) 774 763 return -ENOMEM; 775 764 776 - ret = msix_setup_entries(dev, base, entries, nvec); 765 + ret = msix_setup_entries(dev, base, entries, nvec, affinity); 777 766 if (ret) 778 767 return ret; 779 768 ··· 953 942 } 954 943 EXPORT_SYMBOL(pci_msix_vec_count); 955 944 956 - /** 957 - * pci_enable_msix - configure device's MSI-X capability structure 958 - * @dev: pointer to the pci_dev data structure of MSI-X device function 959 - * @entries: pointer to an array of MSI-X entries (optional) 960 - * @nvec: number of MSI-X irqs requested for allocation by device driver 961 - * 962 - * Setup the MSI-X capability structure of device function with the number 963 - * of requested irqs upon its software driver call to request for 964 - * MSI-X mode enabled on its hardware device function. A return of zero 965 - * indicates the successful configuration of MSI-X capability structure 966 - * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 967 - * Or a return of > 0 indicates that driver request is exceeding the number 968 - * of irqs or MSI-X vectors available. Driver should use the returned value to 969 - * re-send its request. 970 - **/ 971 - int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) 945 + static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 946 + int nvec, bool affinity) 972 947 { 973 948 int nr_entries; 974 949 int i, j; ··· 986 989 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); 987 990 return -EINVAL; 988 991 } 989 - return msix_capability_init(dev, entries, nvec); 992 + return msix_capability_init(dev, entries, nvec, affinity); 993 + } 994 + 995 + /** 996 + * pci_enable_msix - configure device's MSI-X capability structure 997 + * @dev: pointer to the pci_dev data structure of MSI-X device function 998 + * @entries: pointer to an array of MSI-X entries (optional) 999 + * @nvec: number of MSI-X irqs requested for allocation by device driver 1000 + * 1001 + * Setup the MSI-X capability structure of device function with the number 1002 + * of requested irqs upon its software driver call to request for 1003 + * MSI-X mode enabled on its hardware device function. A return of zero 1004 + * indicates the successful configuration of MSI-X capability structure 1005 + * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 1006 + * Or a return of > 0 indicates that driver request is exceeding the number 1007 + * of irqs or MSI-X vectors available. Driver should use the returned value to 1008 + * re-send its request. 1009 + **/ 1010 + int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) 1011 + { 1012 + return __pci_enable_msix(dev, entries, nvec, false); 990 1013 } 991 1014 EXPORT_SYMBOL(pci_enable_msix); 992 1015 ··· 1059 1042 static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, 1060 1043 unsigned int flags) 1061 1044 { 1045 + bool affinity = flags & PCI_IRQ_AFFINITY; 1062 1046 int nvec; 1063 1047 int rc; 1064 1048 ··· 1088 1070 nvec = maxvec; 1089 1071 1090 1072 for (;;) { 1091 - if (flags & PCI_IRQ_AFFINITY) { 1092 - dev->irq_affinity = irq_create_affinity_mask(&nvec); 1073 + if (affinity) { 1074 + nvec = irq_calc_affinity_vectors(dev->irq_affinity, 1075 + nvec); 1093 1076 if (nvec < minvec) 1094 1077 return -ENOSPC; 1095 1078 } 1096 1079 1097 - rc = msi_capability_init(dev, nvec); 1080 + rc = msi_capability_init(dev, nvec, affinity); 1098 1081 if (rc == 0) 1099 1082 return nvec; 1100 - 1101 - kfree(dev->irq_affinity); 1102 - dev->irq_affinity = NULL; 1103 1083 1104 1084 if (rc < 0) 1105 1085 return rc; ··· 1130 1114 struct msix_entry *entries, int minvec, int maxvec, 1131 1115 unsigned int flags) 1132 1116 { 1133 - int nvec = maxvec; 1134 - int rc; 1117 + bool affinity = flags & PCI_IRQ_AFFINITY; 1118 + int rc, nvec = maxvec; 1135 1119 1136 1120 if (maxvec < minvec) 1137 1121 return -ERANGE; 1138 1122 1139 1123 for (;;) { 1140 - if (flags & PCI_IRQ_AFFINITY) { 1141 - dev->irq_affinity = irq_create_affinity_mask(&nvec); 1124 + if (affinity) { 1125 + nvec = irq_calc_affinity_vectors(dev->irq_affinity, 1126 + nvec); 1142 1127 if (nvec < minvec) 1143 1128 return -ENOSPC; 1144 1129 } 1145 1130 1146 - rc = pci_enable_msix(dev, entries, nvec); 1131 + rc = __pci_enable_msix(dev, entries, nvec, affinity); 1147 1132 if (rc == 0) 1148 1133 return nvec; 1149 - 1150 - kfree(dev->irq_affinity); 1151 - dev->irq_affinity = NULL; 1152 1134 1153 1135 if (rc < 0) 1154 1136 return rc; ··· 1270 1256 return dev->irq + nr; 1271 1257 } 1272 1258 EXPORT_SYMBOL(pci_irq_vector); 1259 + 1260 + /** 1261 + * pci_irq_get_affinity - return the affinity of a particular msi vector 1262 + * @dev: PCI device to operate on 1263 + * @nr: device-relative interrupt vector index (0-based). 1264 + */ 1265 + const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) 1266 + { 1267 + if (dev->msix_enabled) { 1268 + struct msi_desc *entry; 1269 + int i = 0; 1270 + 1271 + for_each_pci_msi_entry(entry, dev) { 1272 + if (i == nr) 1273 + return entry->affinity; 1274 + i++; 1275 + } 1276 + WARN_ON_ONCE(1); 1277 + return NULL; 1278 + } else if (dev->msi_enabled) { 1279 + struct msi_desc *entry = first_pci_msi_entry(dev); 1280 + 1281 + if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used)) 1282 + return NULL; 1283 + 1284 + return &entry->affinity[nr]; 1285 + } else { 1286 + return cpu_possible_mask; 1287 + } 1288 + } 1289 + EXPORT_SYMBOL(pci_irq_get_affinity); 1273 1290 1274 1291 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 1275 1292 {
+1 -2
drivers/staging/fsl-mc/bus/mc-msi.c
··· 213 213 struct msi_desc *msi_desc; 214 214 215 215 for (i = 0; i < irq_count; i++) { 216 - msi_desc = alloc_msi_entry(dev); 216 + msi_desc = alloc_msi_entry(dev, 1, NULL); 217 217 if (!msi_desc) { 218 218 dev_err(dev, "Failed to allocate msi entry\n"); 219 219 error = -ENOMEM; ··· 221 221 } 222 222 223 223 msi_desc->fsl_mc.msi_index = i; 224 - msi_desc->nvec_used = 1; 225 224 INIT_LIST_HEAD(&msi_desc->list); 226 225 list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); 227 226 }
+11 -3
include/linux/interrupt.h
··· 278 278 extern int 279 279 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 280 280 281 - struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); 281 + struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec); 282 + int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec); 282 283 283 284 #else /* CONFIG_SMP */ 284 285 ··· 312 311 return 0; 313 312 } 314 313 315 - static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 314 + static inline struct cpumask * 315 + irq_create_affinity_masks(const struct cpumask *affinity, int nvec) 316 316 { 317 - *nr_vecs = 1; 318 317 return NULL; 319 318 } 319 + 320 + static inline int 321 + irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) 322 + { 323 + return maxvec; 324 + } 325 + 320 326 #endif /* CONFIG_SMP */ 321 327 322 328 /*
+3 -2
include/linux/msi.h
··· 68 68 unsigned int nvec_used; 69 69 struct device *dev; 70 70 struct msi_msg msg; 71 - const struct cpumask *affinity; 71 + struct cpumask *affinity; 72 72 73 73 union { 74 74 /* PCI MSI/X specific data */ ··· 123 123 } 124 124 #endif /* CONFIG_PCI_MSI */ 125 125 126 - struct msi_desc *alloc_msi_entry(struct device *dev); 126 + struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, 127 + const struct cpumask *affinity); 127 128 void free_msi_entry(struct msi_desc *entry); 128 129 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 129 130 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+6
include/linux/pci.h
··· 1300 1300 unsigned int max_vecs, unsigned int flags); 1301 1301 void pci_free_irq_vectors(struct pci_dev *dev); 1302 1302 int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1303 + const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); 1303 1304 1304 1305 #else 1305 1306 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } ··· 1342 1341 if (WARN_ON_ONCE(nr > 0)) 1343 1342 return -EINVAL; 1344 1343 return dev->irq; 1344 + } 1345 + static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, 1346 + int vec) 1347 + { 1348 + return cpu_possible_mask; 1345 1349 } 1346 1350 #endif 1347 1351
+132 -41
kernel/irq/affinity.c
··· 4 4 #include <linux/slab.h> 5 5 #include <linux/cpu.h> 6 6 7 - static int get_first_sibling(unsigned int cpu) 7 + static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 8 + int cpus_per_vec) 8 9 { 9 - unsigned int ret; 10 + const struct cpumask *siblmsk; 11 + int cpu, sibl; 10 12 11 - ret = cpumask_first(topology_sibling_cpumask(cpu)); 12 - if (ret < nr_cpu_ids) 13 - return ret; 14 - return cpu; 13 + for ( ; cpus_per_vec > 0; ) { 14 + cpu = cpumask_first(nmsk); 15 + 16 + /* Should not happen, but I'm too lazy to think about it */ 17 + if (cpu >= nr_cpu_ids) 18 + return; 19 + 20 + cpumask_clear_cpu(cpu, nmsk); 21 + cpumask_set_cpu(cpu, irqmsk); 22 + cpus_per_vec--; 23 + 24 + /* If the cpu has siblings, use them first */ 25 + siblmsk = topology_sibling_cpumask(cpu); 26 + for (sibl = -1; cpus_per_vec > 0; ) { 27 + sibl = cpumask_next(sibl, siblmsk); 28 + if (sibl >= nr_cpu_ids) 29 + break; 30 + if (!cpumask_test_and_clear_cpu(sibl, nmsk)) 31 + continue; 32 + cpumask_set_cpu(sibl, irqmsk); 33 + cpus_per_vec--; 34 + } 35 + } 15 36 } 16 37 17 - /* 18 - * Take a map of online CPUs and the number of available interrupt vectors 19 - * and generate an output cpumask suitable for spreading MSI/MSI-X vectors 20 - * so that they are distributed as good as possible around the CPUs. If 21 - * more vectors than CPUs are available we'll map one to each CPU, 22 - * otherwise we map one to the first sibling of each socket. 23 - * 24 - * If there are more vectors than CPUs we will still only have one bit 25 - * set per CPU, but interrupt code will keep on assigning the vectors from 26 - * the start of the bitmap until we run out of vectors. 27 - */ 28 - struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 38 + static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) 29 39 { 30 - struct cpumask *affinity_mask; 31 - unsigned int max_vecs = *nr_vecs; 40 + int n, nodes; 32 41 33 - if (max_vecs == 1) 34 - return NULL; 35 - 36 - affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); 37 - if (!affinity_mask) { 38 - *nr_vecs = 1; 39 - return NULL; 42 + /* Calculate the number of nodes in the supplied affinity mask */ 43 + for (n = 0, nodes = 0; n < num_online_nodes(); n++) { 44 + if (cpumask_intersects(mask, cpumask_of_node(n))) { 45 + node_set(n, *nodemsk); 46 + nodes++; 47 + } 40 48 } 49 + return nodes; 50 + } 41 51 52 + /** 53 + * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 54 + * @affinity: The affinity mask to spread. If NULL cpu_online_mask 55 + * is used 56 + * @nvecs: The number of vectors 57 + * 58 + * Returns the masks pointer or NULL if allocation failed. 59 + */ 60 + struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, 61 + int nvec) 62 + { 63 + int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0; 64 + nodemask_t nodemsk = NODE_MASK_NONE; 65 + struct cpumask *masks; 66 + cpumask_var_t nmsk; 67 + 68 + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 69 + return NULL; 70 + 71 + masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL); 72 + if (!masks) 73 + goto out; 74 + 75 + /* Stabilize the cpumasks */ 42 76 get_online_cpus(); 43 - if (max_vecs >= num_online_cpus()) { 44 - cpumask_copy(affinity_mask, cpu_online_mask); 45 - *nr_vecs = num_online_cpus(); 46 - } else { 47 - unsigned int vecs = 0, cpu; 77 + /* If the supplied affinity mask is NULL, use cpu online mask */ 78 + if (!affinity) 79 + affinity = cpu_online_mask; 48 80 49 - for_each_online_cpu(cpu) { 50 - if (cpu == get_first_sibling(cpu)) { 51 - cpumask_set_cpu(cpu, affinity_mask); 52 - vecs++; 53 - } 81 + nodes = get_nodes_in_cpumask(affinity, &nodemsk); 54 82 55 - if (--max_vecs == 0) 83 + /* 84 + * If the number of nodes in the mask is less than or equal the 85 + * number of vectors we just spread the vectors across the nodes. 86 + */ 87 + if (nvec <= nodes) { 88 + for_each_node_mask(n, nodemsk) { 89 + cpumask_copy(masks + curvec, cpumask_of_node(n)); 90 + if (++curvec == nvec) 56 91 break; 57 92 } 58 - *nr_vecs = vecs; 93 + goto outonl; 59 94 } 60 - put_online_cpus(); 61 95 62 - return affinity_mask; 96 + /* Spread the vectors per node */ 97 + vecs_per_node = nvec / nodes; 98 + /* Account for rounding errors */ 99 + extra_vecs = nvec - (nodes * vecs_per_node); 100 + 101 + for_each_node_mask(n, nodemsk) { 102 + int ncpus, v, vecs_to_assign = vecs_per_node; 103 + 104 + /* Get the cpus on this node which are in the mask */ 105 + cpumask_and(nmsk, affinity, cpumask_of_node(n)); 106 + 107 + /* Calculate the number of cpus per vector */ 108 + ncpus = cpumask_weight(nmsk); 109 + 110 + for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) { 111 + cpus_per_vec = ncpus / vecs_to_assign; 112 + 113 + /* Account for extra vectors to compensate rounding errors */ 114 + if (extra_vecs) { 115 + cpus_per_vec++; 116 + if (!--extra_vecs) 117 + vecs_per_node++; 118 + } 119 + irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 120 + } 121 + 122 + if (curvec >= nvec) 123 + break; 124 + } 125 + 126 + outonl: 127 + put_online_cpus(); 128 + out: 129 + free_cpumask_var(nmsk); 130 + return masks; 131 + } 132 + 133 + /** 134 + * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask 135 + * @affinity: The affinity mask to spread. If NULL cpu_online_mask 136 + * is used 137 + * @maxvec: The maximum number of vectors available 138 + */ 139 + int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec) 140 + { 141 + int cpus, ret; 142 + 143 + /* Stabilize the cpumasks */ 144 + get_online_cpus(); 145 + /* If the supplied affinity mask is NULL, use cpu online mask */ 146 + if (!affinity) 147 + affinity = cpu_online_mask; 148 + 149 + cpus = cpumask_weight(affinity); 150 + ret = (cpus < maxvec) ? cpus : maxvec; 151 + 152 + put_online_cpus(); 153 + return ret; 63 154 }
+15 -16
kernel/irq/irqdesc.c
··· 424 424 const struct cpumask *mask = NULL; 425 425 struct irq_desc *desc; 426 426 unsigned int flags; 427 - int i, cpu = -1; 427 + int i; 428 428 429 - if (affinity && cpumask_empty(affinity)) 430 - return -EINVAL; 429 + /* Validate affinity mask(s) */ 430 + if (affinity) { 431 + for (i = 0, mask = affinity; i < cnt; i++, mask++) { 432 + if (cpumask_empty(mask)) 433 + return -EINVAL; 434 + } 435 + } 431 436 432 437 flags = affinity ? IRQD_AFFINITY_MANAGED : 0; 438 + mask = NULL; 433 439 434 440 for (i = 0; i < cnt; i++) { 435 441 if (affinity) { 436 - cpu = cpumask_next(cpu, affinity); 437 - if (cpu >= nr_cpu_ids) 438 - cpu = cpumask_first(affinity); 439 - node = cpu_to_node(cpu); 440 - 441 - /* 442 - * For single allocations we use the caller provided 443 - * mask otherwise we use the mask of the target cpu 444 - */ 445 - mask = cnt == 1 ? affinity : cpumask_of(cpu); 442 + node = cpu_to_node(cpumask_first(affinity)); 443 + mask = affinity; 444 + affinity++; 446 445 } 447 446 desc = alloc_desc(start + i, node, flags, mask, owner); 448 447 if (!desc) ··· 669 670 * @cnt: Number of consecutive irqs to allocate. 670 671 * @node: Preferred node on which the irq descriptor should be allocated 671 672 * @owner: Owning module (can be NULL) 672 - * @affinity: Optional pointer to an affinity mask which hints where the 673 - * irq descriptors should be allocated and which default 674 - * affinities to use 673 + * @affinity: Optional pointer to an affinity mask array of size @cnt which 674 + * hints where the irq descriptors should be allocated and which 675 + * default affinities to use 675 676 * 676 677 * Returns the first irq number or error code 677 678 */
+24 -2
kernel/irq/msi.c
··· 18 18 /* Temparory solution for building, will be removed later */ 19 19 #include <linux/pci.h> 20 20 21 - struct msi_desc *alloc_msi_entry(struct device *dev) 21 + /** 22 + * alloc_msi_entry - Allocate an initialize msi_entry 23 + * @dev: Pointer to the device for which this is allocated 24 + * @nvec: The number of vectors used in this entry 25 + * @affinity: Optional pointer to an affinity mask array size of @nvec 26 + * 27 + * If @affinity is not NULL then a an affinity array[@nvec] is allocated 28 + * and the affinity masks from @affinity are copied. 29 + */ 30 + struct msi_desc * 31 + alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity) 22 32 { 23 - struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 33 + struct msi_desc *desc; 34 + 35 + desc = kzalloc(sizeof(*desc), GFP_KERNEL); 24 36 if (!desc) 25 37 return NULL; 26 38 27 39 INIT_LIST_HEAD(&desc->list); 28 40 desc->dev = dev; 41 + desc->nvec_used = nvec; 42 + if (affinity) { 43 + desc->affinity = kmemdup(affinity, 44 + nvec * sizeof(*desc->affinity), GFP_KERNEL); 45 + if (!desc->affinity) { 46 + kfree(desc); 47 + return NULL; 48 + } 49 + } 29 50 30 51 return desc; 31 52 } 32 53 33 54 void free_msi_entry(struct msi_desc *entry) 34 55 { 56 + kfree(entry->affinity); 35 57 kfree(entry); 36 58 } 37 59