Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: embed irq_entry in idxd_wq struct

With irq_entry already being associated with the wq in a 1:1 relationship,
embed the irq_entry in the idxd_wq struct and remove back pointers for
idxe_wq and idxd_device. In the process of this work, clean up the interrupt
handle assignment so that there's no decision to be made during submit
call on where interrupt handle value comes from. Set the interrupt handle
during irq request initialization time.

irq_entry 0 is designated as special and is tied to the device itself.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163942148362.2412839.12055447853311267866.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
ec0d6423 26e9baa8

+79 -99
+8 -10
drivers/dma/idxd/device.c
··· 21 21 /* Interrupt control bits */ 22 22 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) 23 23 { 24 - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); 24 + struct idxd_irq_entry *ie; 25 + struct irq_data *data; 25 26 27 + ie = idxd_get_ie(idxd, vec_id); 28 + data = irq_get_irq_data(ie->vector); 26 29 pci_msi_mask_irq(data); 27 30 } 28 31 ··· 41 38 42 39 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) 43 40 { 44 - struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector); 41 + struct idxd_irq_entry *ie; 42 + struct irq_data *data; 45 43 44 + ie = idxd_get_ie(idxd, vec_id); 45 + data = irq_get_irq_data(ie->vector); 46 46 pci_msi_unmask_irq(data); 47 47 } 48 48 ··· 1222 1216 goto err; 1223 1217 } 1224 1218 1225 - /* 1226 - * Device has 1 misc interrupt and N interrupts for descriptor completion. To 1227 - * assign WQ to interrupt, we will take the N+1 interrupt since vector 0 is 1228 - * for the misc interrupt. 1229 - */ 1230 - wq->ie = &idxd->irq_entries[wq->id + 1]; 1231 - 1232 1219 rc = idxd_wq_enable(wq); 1233 1220 if (rc < 0) { 1234 1221 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc); ··· 1272 1273 idxd_wq_drain(wq); 1273 1274 idxd_wq_reset(wq); 1274 1275 1275 - wq->ie = NULL; 1276 1276 wq->client_count = 0; 1277 1277 } 1278 1278
+17 -5
drivers/dma/idxd/idxd.h
··· 70 70 71 71 #define INVALID_INT_HANDLE -1 72 72 struct idxd_irq_entry { 73 - struct idxd_device *idxd; 74 73 int id; 75 74 int vector; 76 75 struct llist_head pending_llist; ··· 80 81 */ 81 82 spinlock_t list_lock; 82 83 int int_handle; 83 - struct idxd_wq *wq; 84 84 ioasid_t pasid; 85 85 }; 86 86 ··· 183 185 struct wait_queue_head err_queue; 184 186 struct idxd_device *idxd; 185 187 int id; 186 - struct idxd_irq_entry *ie; 188 + struct idxd_irq_entry ie; 187 189 enum idxd_wq_type type; 188 190 struct idxd_group *group; 189 191 int client_count; ··· 264 266 int id; 265 267 int major; 266 268 u32 cmd_status; 269 + struct idxd_irq_entry ie; /* misc irq, msix 0 */ 267 270 268 271 struct pci_dev *pdev; 269 272 void __iomem *reg_base; ··· 301 302 302 303 union sw_err_reg sw_err; 303 304 wait_queue_head_t cmd_waitq; 304 - int num_wq_irqs; 305 - struct idxd_irq_entry *irq_entries; 306 305 307 306 struct idxd_dma_dev *idxd_dma; 308 307 struct workqueue_struct *wq; ··· 390 393 } 391 394 392 395 idev->type = type; 396 + } 397 + 398 + static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx) 399 + { 400 + return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie; 401 + } 402 + 403 + static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie) 404 + { 405 + return container_of(ie, struct idxd_wq, ie); 406 + } 407 + 408 + static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie) 409 + { 410 + return container_of(ie, struct idxd_device, ie); 393 411 } 394 412 395 413 extern struct bus_type dsa_bus_type;
+47 -72
drivers/dma/idxd/init.c
··· 72 72 { 73 73 struct pci_dev *pdev = idxd->pdev; 74 74 struct device *dev = &pdev->dev; 75 - struct idxd_irq_entry *irq_entry; 75 + struct idxd_irq_entry *ie; 76 76 int i, msixcnt; 77 77 int rc = 0; 78 78 ··· 90 90 } 91 91 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 92 92 93 - /* 94 - * We implement 1 completion list per MSI-X entry except for 95 - * entry 0, which is for errors and others. 96 - */ 97 - idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), 98 - GFP_KERNEL, dev_to_node(dev)); 99 - if (!idxd->irq_entries) { 100 - rc = -ENOMEM; 101 - goto err_irq_entries; 102 - } 103 - 104 - for (i = 0; i < msixcnt; i++) { 105 - idxd->irq_entries[i].id = i; 106 - idxd->irq_entries[i].idxd = idxd; 107 - /* 108 - * Association of WQ should be assigned starting with irq_entry 1. 109 - * irq_entry 0 is for misc interrupts and has no wq association 110 - */ 111 - if (i > 0) 112 - idxd->irq_entries[i].wq = idxd->wqs[i - 1]; 113 - idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); 114 - idxd->irq_entries[i].int_handle = INVALID_INT_HANDLE; 115 - if (device_pasid_enabled(idxd) && i > 0) 116 - idxd->irq_entries[i].pasid = idxd->pasid; 117 - else 118 - idxd->irq_entries[i].pasid = INVALID_IOASID; 119 - spin_lock_init(&idxd->irq_entries[i].list_lock); 120 - } 121 - 122 93 idxd_msix_perm_setup(idxd); 123 94 124 - irq_entry = &idxd->irq_entries[0]; 125 - rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, 126 - 0, "idxd-misc", irq_entry); 95 + ie = idxd_get_ie(idxd, 0); 96 + ie->vector = pci_irq_vector(pdev, 0); 97 + rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 127 98 if (rc < 0) { 128 99 dev_err(dev, "Failed to allocate misc interrupt.\n"); 129 100 goto err_misc_irq; 130 101 } 131 102 132 - dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); 103 + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", ie->vector); 133 104 134 - /* first MSI-X entry is not for wq interrupts */ 135 - idxd->num_wq_irqs = msixcnt - 1; 105 + for (i = 0; i < idxd->max_wqs; i++) { 106 + int msix_idx = i + 1; 136 107 137 - for (i = 1; i < msixcnt; i++) { 138 - irq_entry = &idxd->irq_entries[i]; 108 + ie = idxd_get_ie(idxd, msix_idx); 139 109 140 - init_llist_head(&idxd->irq_entries[i].pending_llist); 141 - INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); 142 - rc = request_threaded_irq(irq_entry->vector, NULL, 143 - idxd_wq_thread, 0, "idxd-portal", irq_entry); 110 + /* MSIX vector 0 special, wq irq entry starts at 1 */ 111 + ie->id = msix_idx; 112 + ie->vector = pci_irq_vector(pdev, msix_idx); 113 + ie->int_handle = INVALID_INT_HANDLE; 114 + if (device_pasid_enabled(idxd) && i > 0) 115 + ie->pasid = idxd->pasid; 116 + else 117 + ie->pasid = INVALID_IOASID; 118 + spin_lock_init(&ie->list_lock); 119 + init_llist_head(&ie->pending_llist); 120 + INIT_LIST_HEAD(&ie->work_list); 121 + 122 + rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); 144 123 if (rc < 0) { 145 - dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); 124 + dev_err(dev, "Failed to allocate irq %d.\n", ie->vector); 146 125 goto err_wq_irqs; 147 126 } 148 127 149 - dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); 128 + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, ie->vector); 150 129 if (idxd->request_int_handles) { 151 - rc = idxd_device_request_int_handle(idxd, i, &irq_entry->int_handle, 130 + rc = idxd_device_request_int_handle(idxd, i, &ie->int_handle, 152 131 IDXD_IRQ_MSIX); 153 132 if (rc < 0) { 154 - free_irq(irq_entry->vector, irq_entry); 133 + free_irq(ie->vector, ie); 155 134 goto err_wq_irqs; 156 135 } 157 - dev_dbg(dev, "int handle requested: %u\n", irq_entry->int_handle); 136 + dev_dbg(dev, "int handle requested: %u\n", ie->int_handle); 137 + } else { 138 + ie->int_handle = msix_idx; 158 139 } 140 + 159 141 } 160 142 161 143 idxd_unmask_error_interrupts(idxd); ··· 145 163 146 164 err_wq_irqs: 147 165 while (--i >= 0) { 148 - irq_entry = &idxd->irq_entries[i]; 149 - free_irq(irq_entry->vector, irq_entry); 150 - if (irq_entry->int_handle != INVALID_INT_HANDLE) { 151 - idxd_device_release_int_handle(idxd, irq_entry->int_handle, 152 - IDXD_IRQ_MSIX); 153 - irq_entry->int_handle = INVALID_INT_HANDLE; 154 - irq_entry->pasid = INVALID_IOASID; 166 + ie = &idxd->wqs[i]->ie; 167 + free_irq(ie->vector, ie); 168 + if (ie->int_handle != INVALID_INT_HANDLE) { 169 + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 170 + ie->int_handle = INVALID_INT_HANDLE; 171 + ie->pasid = INVALID_IOASID; 155 172 } 156 - irq_entry->vector = -1; 157 - irq_entry->wq = NULL; 158 - irq_entry->idxd = NULL; 173 + ie->vector = -1; 159 174 } 160 175 err_misc_irq: 161 176 /* Disable error interrupt generation */ 162 177 idxd_mask_error_interrupts(idxd); 163 178 idxd_msix_perm_clear(idxd); 164 - err_irq_entries: 165 179 pci_free_irq_vectors(pdev); 166 180 dev_err(dev, "No usable interrupts\n"); 167 181 return rc; ··· 166 188 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 167 189 { 168 190 struct pci_dev *pdev = idxd->pdev; 169 - struct idxd_irq_entry *irq_entry; 191 + struct idxd_irq_entry *ie; 170 192 int i; 171 193 172 194 for (i = 0; i < idxd->irq_cnt; i++) { 173 - irq_entry = &idxd->irq_entries[i]; 174 - if (irq_entry->int_handle != INVALID_INT_HANDLE) { 175 - idxd_device_release_int_handle(idxd, irq_entry->int_handle, 176 - IDXD_IRQ_MSIX); 177 - irq_entry->int_handle = INVALID_INT_HANDLE; 178 - irq_entry->pasid = INVALID_IOASID; 195 + ie = idxd_get_ie(idxd, i); 196 + if (ie->int_handle != INVALID_INT_HANDLE) { 197 + idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 198 + ie->int_handle = INVALID_INT_HANDLE; 199 + ie->pasid = INVALID_IOASID; 179 200 } 180 - irq_entry->vector = -1; 181 - irq_entry->wq = NULL; 182 - irq_entry->idxd = NULL; 183 - free_irq(irq_entry->vector, irq_entry); 201 + free_irq(ie->vector, ie); 202 + ie->vector = -1; 184 203 } 185 204 186 205 idxd_mask_error_interrupts(idxd); ··· 730 755 int i, rc; 731 756 732 757 for (i = 1; i < idxd->irq_cnt; i++) { 733 - struct idxd_irq_entry *ie = &idxd->irq_entries[i]; 758 + struct idxd_irq_entry *ie = idxd_get_ie(idxd, i); 734 759 735 760 if (ie->int_handle != INVALID_INT_HANDLE) { 736 761 rc = idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); ··· 758 783 idxd_mask_error_interrupts(idxd); 759 784 760 785 for (i = 0; i < msixcnt; i++) { 761 - irq_entry = &idxd->irq_entries[i]; 786 + irq_entry = idxd_get_ie(idxd, i); 762 787 synchronize_irq(irq_entry->vector); 763 788 if (i == 0) 764 789 continue; ··· 790 815 idxd_disable_system_pasid(idxd); 791 816 792 817 for (i = 0; i < msixcnt; i++) { 793 - irq_entry = &idxd->irq_entries[i]; 818 + irq_entry = idxd_get_ie(idxd, i); 794 819 free_irq(irq_entry->vector, irq_entry); 795 820 } 796 821 idxd_msix_perm_clear(idxd);
+5 -5
drivers/dma/idxd/irq.c
··· 73 73 */ 74 74 static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie) 75 75 { 76 - struct idxd_wq *wq = ie->wq; 77 - struct idxd_device *idxd = ie->idxd; 76 + struct idxd_wq *wq = ie_to_wq(ie); 77 + struct idxd_device *idxd = wq->idxd; 78 78 struct device *dev = &idxd->pdev->dev; 79 79 struct dsa_hw_desc desc = {}; 80 80 void __iomem *portal; ··· 155 155 * at the end to make sure all invalid int handle descriptors are processed. 156 156 */ 157 157 for (i = 1; i < idxd->irq_cnt; i++) { 158 - struct idxd_irq_entry *ie = &idxd->irq_entries[i]; 159 - struct idxd_wq *wq = ie->wq; 158 + struct idxd_irq_entry *ie = idxd_get_ie(idxd, i); 159 + struct idxd_wq *wq = ie_to_wq(ie); 160 160 161 161 rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX); 162 162 if (rc < 0) { ··· 338 338 irqreturn_t idxd_misc_thread(int vec, void *data) 339 339 { 340 340 struct idxd_irq_entry *irq_entry = data; 341 - struct idxd_device *idxd = irq_entry->idxd; 341 + struct idxd_device *idxd = ie_to_idxd(irq_entry); 342 342 int rc; 343 343 u32 cause; 344 344
+2 -6
drivers/dma/idxd/submit.c
··· 193 193 * that we designated the descriptor to. 194 194 */ 195 195 if (desc_flags & IDXD_OP_FLAG_RCI) { 196 - ie = wq->ie; 197 - if (ie->int_handle == INVALID_INT_HANDLE) 198 - desc->hw->int_handle = ie->id; 199 - else 200 - desc->hw->int_handle = ie->int_handle; 201 - 196 + ie = &wq->ie; 197 + desc->hw->int_handle = ie->int_handle; 202 198 llist_add(&desc->llnode, &ie->pending_llist); 203 199 } 204 200
-1
drivers/dma/idxd/sysfs.c
··· 1304 1304 kfree(idxd->groups); 1305 1305 kfree(idxd->wqs); 1306 1306 kfree(idxd->engines); 1307 - kfree(idxd->irq_entries); 1308 1307 ida_free(&idxd_ida, idxd->id); 1309 1308 kfree(idxd); 1310 1309 }