Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: fix dma device lifetime

The devm managed lifetime is incompatible with 'struct device' objects that
resides in idxd context. This is one of the series that clean up the idxd
driver 'struct device' lifetime. Remove embedding of dma_device and dma_chan
in idxd since it's not the only interface that idxd will use. The freeing of
the dma_device will be managed by the ->release() function.

Reported-by: Jason Gunthorpe <jgg@nvidia.com>
Fixes: bfe1d56091c1 ("dmaengine: idxd: Init and probe for Intel data accelerators")
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Link: https://lore.kernel.org/r/161852983001.2203940.14817017492384561719.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
39786285 63606522

+79 -18
-2
drivers/dma/idxd/device.c
··· 186 186 desc->id = i; 187 187 desc->wq = wq; 188 188 desc->cpu = -1; 189 - dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); 190 - desc->txd.tx_submit = idxd_dma_tx_submit; 191 189 } 192 190 193 191 return 0;
+64 -13
drivers/dma/idxd/dma.c
··· 14 14 15 15 static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) 16 16 { 17 - return container_of(c, struct idxd_wq, dma_chan); 17 + struct idxd_dma_chan *idxd_chan; 18 + 19 + idxd_chan = container_of(c, struct idxd_dma_chan, chan); 20 + return idxd_chan->wq; 18 21 } 19 22 20 23 void idxd_dma_complete_txd(struct idxd_desc *desc, ··· 138 135 { 139 136 } 140 137 141 - dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) 138 + static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) 142 139 { 143 140 struct dma_chan *c = tx->chan; 144 141 struct idxd_wq *wq = to_idxd_wq(c); ··· 159 156 160 157 static void idxd_dma_release(struct dma_device *device) 161 158 { 159 + struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma); 160 + 161 + kfree(idxd_dma); 162 162 } 163 163 164 164 int idxd_register_dma_device(struct idxd_device *idxd) 165 165 { 166 - struct dma_device *dma = &idxd->dma_dev; 166 + struct idxd_dma_dev *idxd_dma; 167 + struct dma_device *dma; 168 + struct device *dev = &idxd->pdev->dev; 169 + int rc; 167 170 171 + idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev)); 172 + if (!idxd_dma) 173 + return -ENOMEM; 174 + 175 + dma = &idxd_dma->dma; 168 176 INIT_LIST_HEAD(&dma->channels); 169 - dma->dev = &idxd->pdev->dev; 177 + dma->dev = dev; 170 178 171 179 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 172 180 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); ··· 193 179 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; 194 180 dma->device_free_chan_resources = idxd_dma_free_chan_resources; 195 181 196 - return dma_async_device_register(&idxd->dma_dev); 182 + rc = dma_async_device_register(dma); 183 + if (rc < 0) { 184 + kfree(idxd_dma); 185 + return rc; 186 + } 187 + 188 + idxd_dma->idxd = idxd; 189 + /* 190 + * This pointer is protected by the refs taken by the dma_chan. It will remain valid 191 + * as long as there are outstanding channels. 192 + */ 193 + idxd->idxd_dma = idxd_dma; 194 + return 0; 197 195 } 198 196 199 197 void idxd_unregister_dma_device(struct idxd_device *idxd) 200 198 { 201 - dma_async_device_unregister(&idxd->dma_dev); 199 + dma_async_device_unregister(&idxd->idxd_dma->dma); 202 200 } 203 201 204 202 int idxd_register_dma_channel(struct idxd_wq *wq) 205 203 { 206 204 struct idxd_device *idxd = wq->idxd; 207 - struct dma_device *dma = &idxd->dma_dev; 208 - struct dma_chan *chan = &wq->dma_chan; 209 - int rc; 205 + struct dma_device *dma = &idxd->idxd_dma->dma; 206 + struct device *dev = &idxd->pdev->dev; 207 + struct idxd_dma_chan *idxd_chan; 208 + struct dma_chan *chan; 209 + int rc, i; 210 210 211 - memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); 211 + idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev)); 212 + if (!idxd_chan) 213 + return -ENOMEM; 214 + 215 + chan = &idxd_chan->chan; 212 216 chan->device = dma; 213 217 list_add_tail(&chan->device_node, &dma->channels); 218 + 219 + for (i = 0; i < wq->num_descs; i++) { 220 + struct idxd_desc *desc = wq->descs[i]; 221 + 222 + dma_async_tx_descriptor_init(&desc->txd, chan); 223 + desc->txd.tx_submit = idxd_dma_tx_submit; 224 + } 225 + 214 226 rc = dma_async_device_channel_register(dma, chan); 215 - if (rc < 0) 227 + if (rc < 0) { 228 + kfree(idxd_chan); 216 229 return rc; 230 + } 231 + 232 + wq->idxd_chan = idxd_chan; 233 + idxd_chan->wq = wq; 234 + get_device(&wq->conf_dev); 217 235 218 236 return 0; 219 237 } 220 238 221 239 void idxd_unregister_dma_channel(struct idxd_wq *wq) 222 240 { 223 - struct dma_chan *chan = &wq->dma_chan; 241 + struct idxd_dma_chan *idxd_chan = wq->idxd_chan; 242 + struct dma_chan *chan = &idxd_chan->chan; 243 + struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma; 224 244 225 - dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan); 245 + dma_async_device_channel_unregister(&idxd_dma->dma, chan); 226 246 list_del(&chan->device_node); 247 + kfree(wq->idxd_chan); 248 + wq->idxd_chan = NULL; 249 + put_device(&wq->conf_dev); 227 250 }
+15 -3
drivers/dma/idxd/idxd.h
··· 14 14 15 15 extern struct kmem_cache *idxd_desc_pool; 16 16 17 + struct idxd_device; 18 + struct idxd_wq; 19 + 17 20 #define IDXD_REG_TIMEOUT 50 18 21 #define IDXD_DRAIN_TIMEOUT 5000 19 22 ··· 99 96 IDXD_COMPLETE_DEV_FAIL, 100 97 }; 101 98 99 + struct idxd_dma_chan { 100 + struct dma_chan chan; 101 + struct idxd_wq *wq; 102 + }; 103 + 102 104 struct idxd_wq { 103 105 void __iomem *portal; 104 106 struct device conf_dev; ··· 133 125 int compls_size; 134 126 struct idxd_desc **descs; 135 127 struct sbitmap_queue sbq; 136 - struct dma_chan dma_chan; 128 + struct idxd_dma_chan *idxd_chan; 137 129 char name[WQ_NAME_SIZE + 1]; 138 130 u64 max_xfer_bytes; 139 131 u32 max_batch_size; ··· 168 160 IDXD_FLAG_CONFIGURABLE = 0, 169 161 IDXD_FLAG_CMD_RUNNING, 170 162 IDXD_FLAG_PASID_ENABLED, 163 + }; 164 + 165 + struct idxd_dma_dev { 166 + struct idxd_device *idxd; 167 + struct dma_device dma; 171 168 }; 172 169 173 170 struct idxd_device { ··· 223 210 int num_wq_irqs; 224 211 struct idxd_irq_entry *irq_entries; 225 212 226 - struct dma_device dma_dev; 213 + struct idxd_dma_dev *idxd_dma; 227 214 struct workqueue_struct *wq; 228 215 struct work_struct work; 229 216 }; ··· 376 363 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); 377 364 void idxd_dma_complete_txd(struct idxd_desc *desc, 378 365 enum idxd_complete_type comp_type); 379 - dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); 380 366 381 367 /* cdev */ 382 368 int idxd_cdev_register(void);