Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
8#include <linux/dmaengine.h>
9#include <linux/irq.h>
10#include <uapi/linux/idxd.h>
11#include "../dmaengine.h"
12#include "idxd.h"
13#include "registers.h"
14
15static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
16 u32 *status);
17static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
18static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
19
20/* Interrupt control bits */
21void idxd_unmask_error_interrupts(struct idxd_device *idxd)
22{
23 union genctrl_reg genctrl;
24
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
26 genctrl.softerr_int_en = 1;
27 genctrl.halt_int_en = 1;
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
29}
30
31void idxd_mask_error_interrupts(struct idxd_device *idxd)
32{
33 union genctrl_reg genctrl;
34
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
36 genctrl.softerr_int_en = 0;
37 genctrl.halt_int_en = 0;
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
39}
40
41static void free_hw_descs(struct idxd_wq *wq)
42{
43 int i;
44
45 for (i = 0; i < wq->num_descs; i++)
46 kfree(wq->hw_descs[i]);
47
48 kfree(wq->hw_descs);
49}
50
51static int alloc_hw_descs(struct idxd_wq *wq, int num)
52{
53 struct device *dev = &wq->idxd->pdev->dev;
54 int i;
55 int node = dev_to_node(dev);
56
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
58 GFP_KERNEL, node);
59 if (!wq->hw_descs)
60 return -ENOMEM;
61
62 for (i = 0; i < num; i++) {
63 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
64 GFP_KERNEL, node);
65 if (!wq->hw_descs[i]) {
66 free_hw_descs(wq);
67 return -ENOMEM;
68 }
69 }
70
71 return 0;
72}
73
74static void free_descs(struct idxd_wq *wq)
75{
76 int i;
77
78 for (i = 0; i < wq->num_descs; i++)
79 kfree(wq->descs[i]);
80
81 kfree(wq->descs);
82}
83
84static int alloc_descs(struct idxd_wq *wq, int num)
85{
86 struct device *dev = &wq->idxd->pdev->dev;
87 int i;
88 int node = dev_to_node(dev);
89
90 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
91 GFP_KERNEL, node);
92 if (!wq->descs)
93 return -ENOMEM;
94
95 for (i = 0; i < num; i++) {
96 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
97 GFP_KERNEL, node);
98 if (!wq->descs[i]) {
99 free_descs(wq);
100 return -ENOMEM;
101 }
102 }
103
104 return 0;
105}
106
107/* WQ control bits */
108int idxd_wq_alloc_resources(struct idxd_wq *wq)
109{
110 struct idxd_device *idxd = wq->idxd;
111 struct device *dev = &idxd->pdev->dev;
112 int rc, num_descs, i;
113
114 if (wq->type != IDXD_WQT_KERNEL)
115 return 0;
116
117 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
118 wq->num_descs = num_descs;
119
120 rc = alloc_hw_descs(wq, num_descs);
121 if (rc < 0)
122 return rc;
123
124 wq->compls_size = num_descs * idxd->data->compl_size;
125 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
126 if (!wq->compls) {
127 rc = -ENOMEM;
128 goto fail_alloc_compls;
129 }
130
131 rc = alloc_descs(wq, num_descs);
132 if (rc < 0)
133 goto fail_alloc_descs;
134
135 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
136 dev_to_node(dev));
137 if (rc < 0)
138 goto fail_sbitmap_init;
139
140 for (i = 0; i < num_descs; i++) {
141 struct idxd_desc *desc = wq->descs[i];
142
143 desc->hw = wq->hw_descs[i];
144 if (idxd->data->type == IDXD_TYPE_DSA)
145 desc->completion = &wq->compls[i];
146 else if (idxd->data->type == IDXD_TYPE_IAX)
147 desc->iax_completion = &wq->iax_compls[i];
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
149 desc->id = i;
150 desc->wq = wq;
151 desc->cpu = -1;
152 }
153
154 return 0;
155
156 fail_sbitmap_init:
157 free_descs(wq);
158 fail_alloc_descs:
159 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
160 fail_alloc_compls:
161 free_hw_descs(wq);
162 return rc;
163}
164
165void idxd_wq_free_resources(struct idxd_wq *wq)
166{
167 struct device *dev = &wq->idxd->pdev->dev;
168
169 if (wq->type != IDXD_WQT_KERNEL)
170 return;
171
172 free_hw_descs(wq);
173 free_descs(wq);
174 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
175 sbitmap_queue_free(&wq->sbq);
176}
177
178int idxd_wq_enable(struct idxd_wq *wq)
179{
180 struct idxd_device *idxd = wq->idxd;
181 struct device *dev = &idxd->pdev->dev;
182 u32 status;
183
184 if (wq->state == IDXD_WQ_ENABLED) {
185 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
186 return 0;
187 }
188
189 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
190
191 if (status != IDXD_CMDSTS_SUCCESS &&
192 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
193 dev_dbg(dev, "WQ enable failed: %#x\n", status);
194 return -ENXIO;
195 }
196
197 wq->state = IDXD_WQ_ENABLED;
198 set_bit(wq->id, idxd->wq_enable_map);
199 dev_dbg(dev, "WQ %d enabled\n", wq->id);
200 return 0;
201}
202
203int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
204{
205 struct idxd_device *idxd = wq->idxd;
206 struct device *dev = &idxd->pdev->dev;
207 u32 status, operand;
208
209 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
210
211 if (wq->state != IDXD_WQ_ENABLED) {
212 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
213 return 0;
214 }
215
216 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
217 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
218
219 if (status != IDXD_CMDSTS_SUCCESS) {
220 dev_dbg(dev, "WQ disable failed: %#x\n", status);
221 return -ENXIO;
222 }
223
224 if (reset_config)
225 idxd_wq_disable_cleanup(wq);
226 clear_bit(wq->id, idxd->wq_enable_map);
227 wq->state = IDXD_WQ_DISABLED;
228 dev_dbg(dev, "WQ %d disabled\n", wq->id);
229 return 0;
230}
231
232void idxd_wq_drain(struct idxd_wq *wq)
233{
234 struct idxd_device *idxd = wq->idxd;
235 struct device *dev = &idxd->pdev->dev;
236 u32 operand;
237
238 if (wq->state != IDXD_WQ_ENABLED) {
239 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
240 return;
241 }
242
243 dev_dbg(dev, "Draining WQ %d\n", wq->id);
244 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
245 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
246}
247
248void idxd_wq_reset(struct idxd_wq *wq)
249{
250 struct idxd_device *idxd = wq->idxd;
251 struct device *dev = &idxd->pdev->dev;
252 u32 operand;
253
254 if (wq->state != IDXD_WQ_ENABLED) {
255 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
256 return;
257 }
258
259 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
260 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
261 idxd_wq_disable_cleanup(wq);
262}
263
264int idxd_wq_map_portal(struct idxd_wq *wq)
265{
266 struct idxd_device *idxd = wq->idxd;
267 struct pci_dev *pdev = idxd->pdev;
268 struct device *dev = &pdev->dev;
269 resource_size_t start;
270
271 start = pci_resource_start(pdev, IDXD_WQ_BAR);
272 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
273
274 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
275 if (!wq->portal)
276 return -ENOMEM;
277
278 return 0;
279}
280
281void idxd_wq_unmap_portal(struct idxd_wq *wq)
282{
283 struct device *dev = &wq->idxd->pdev->dev;
284
285 devm_iounmap(dev, wq->portal);
286 wq->portal = NULL;
287 wq->portal_offset = 0;
288}
289
290void idxd_wqs_unmap_portal(struct idxd_device *idxd)
291{
292 int i;
293
294 for (i = 0; i < idxd->max_wqs; i++) {
295 struct idxd_wq *wq = idxd->wqs[i];
296
297 if (wq->portal)
298 idxd_wq_unmap_portal(wq);
299 }
300}
301
302static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
303{
304 struct idxd_device *idxd = wq->idxd;
305 union wqcfg wqcfg;
306 unsigned int offset;
307
308 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
309 spin_lock(&idxd->dev_lock);
310 wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
311 wqcfg.priv = priv;
312 wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
313 iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
314 spin_unlock(&idxd->dev_lock);
315}
316
317static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
318{
319 struct idxd_device *idxd = wq->idxd;
320 union wqcfg wqcfg;
321 unsigned int offset;
322
323 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
324 spin_lock(&idxd->dev_lock);
325 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
326 wqcfg.pasid_en = 1;
327 wqcfg.pasid = pasid;
328 wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
329 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
330 spin_unlock(&idxd->dev_lock);
331}
332
333int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
334{
335 int rc;
336
337 rc = idxd_wq_disable(wq, false);
338 if (rc < 0)
339 return rc;
340
341 __idxd_wq_set_pasid_locked(wq, pasid);
342
343 rc = idxd_wq_enable(wq);
344 if (rc < 0)
345 return rc;
346
347 return 0;
348}
349
350int idxd_wq_disable_pasid(struct idxd_wq *wq)
351{
352 struct idxd_device *idxd = wq->idxd;
353 int rc;
354 union wqcfg wqcfg;
355 unsigned int offset;
356
357 rc = idxd_wq_disable(wq, false);
358 if (rc < 0)
359 return rc;
360
361 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
362 spin_lock(&idxd->dev_lock);
363 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
364 wqcfg.pasid_en = 0;
365 wqcfg.pasid = 0;
366 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
367 spin_unlock(&idxd->dev_lock);
368
369 rc = idxd_wq_enable(wq);
370 if (rc < 0)
371 return rc;
372
373 return 0;
374}
375
376static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
377{
378 struct idxd_device *idxd = wq->idxd;
379
380 lockdep_assert_held(&wq->wq_lock);
381 wq->state = IDXD_WQ_DISABLED;
382 memset(wq->wqcfg, 0, idxd->wqcfg_size);
383 wq->type = IDXD_WQT_NONE;
384 wq->threshold = 0;
385 wq->priority = 0;
386 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
387 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
388 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
389 clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
390 memset(wq->name, 0, WQ_NAME_SIZE);
391 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
392 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
393 if (wq->opcap_bmap)
394 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
395}
396
397static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
398{
399 lockdep_assert_held(&wq->wq_lock);
400
401 wq->size = 0;
402 wq->group = NULL;
403}
404
405static void idxd_wq_ref_release(struct percpu_ref *ref)
406{
407 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
408
409 complete(&wq->wq_dead);
410}
411
412int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
413{
414 int rc;
415
416 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
417 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
418 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
419 if (rc < 0)
420 return rc;
421 reinit_completion(&wq->wq_dead);
422 reinit_completion(&wq->wq_resurrect);
423 return 0;
424}
425
426void __idxd_wq_quiesce(struct idxd_wq *wq)
427{
428 lockdep_assert_held(&wq->wq_lock);
429 reinit_completion(&wq->wq_resurrect);
430 percpu_ref_kill(&wq->wq_active);
431 complete_all(&wq->wq_resurrect);
432 wait_for_completion(&wq->wq_dead);
433}
434
435void idxd_wq_quiesce(struct idxd_wq *wq)
436{
437 mutex_lock(&wq->wq_lock);
438 __idxd_wq_quiesce(wq);
439 mutex_unlock(&wq->wq_lock);
440}
441
442/* Device control bits */
443static inline bool idxd_is_enabled(struct idxd_device *idxd)
444{
445 union gensts_reg gensts;
446
447 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
448
449 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
450 return true;
451 return false;
452}
453
454static inline bool idxd_device_is_halted(struct idxd_device *idxd)
455{
456 union gensts_reg gensts;
457
458 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
459
460 return (gensts.state == IDXD_DEVICE_STATE_HALT);
461}
462
463/*
464 * This is function is only used for reset during probe and will
465 * poll for completion. Once the device is setup with interrupts,
466 * all commands will be done via interrupt completion.
467 */
468int idxd_device_init_reset(struct idxd_device *idxd)
469{
470 struct device *dev = &idxd->pdev->dev;
471 union idxd_command_reg cmd;
472
473 if (idxd_device_is_halted(idxd)) {
474 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
475 return -ENXIO;
476 }
477
478 memset(&cmd, 0, sizeof(cmd));
479 cmd.cmd = IDXD_CMD_RESET_DEVICE;
480 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
481 spin_lock(&idxd->cmd_lock);
482 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
483
484 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
485 IDXD_CMDSTS_ACTIVE)
486 cpu_relax();
487 spin_unlock(&idxd->cmd_lock);
488 return 0;
489}
490
491static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
492 u32 *status)
493{
494 union idxd_command_reg cmd;
495 DECLARE_COMPLETION_ONSTACK(done);
496 u32 stat;
497
498 if (idxd_device_is_halted(idxd)) {
499 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
500 if (status)
501 *status = IDXD_CMDSTS_HW_ERR;
502 return;
503 }
504
505 memset(&cmd, 0, sizeof(cmd));
506 cmd.cmd = cmd_code;
507 cmd.operand = operand;
508 cmd.int_req = 1;
509
510 spin_lock(&idxd->cmd_lock);
511 wait_event_lock_irq(idxd->cmd_waitq,
512 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
513 idxd->cmd_lock);
514
515 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
516 __func__, cmd_code, operand);
517
518 idxd->cmd_status = 0;
519 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
520 idxd->cmd_done = &done;
521 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
522
523 /*
524 * After command submitted, release lock and go to sleep until
525 * the command completes via interrupt.
526 */
527 spin_unlock(&idxd->cmd_lock);
528 wait_for_completion(&done);
529 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
530 spin_lock(&idxd->cmd_lock);
531 if (status)
532 *status = stat;
533 idxd->cmd_status = stat & GENMASK(7, 0);
534
535 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
536 /* Wake up other pending commands */
537 wake_up(&idxd->cmd_waitq);
538 spin_unlock(&idxd->cmd_lock);
539}
540
541int idxd_device_enable(struct idxd_device *idxd)
542{
543 struct device *dev = &idxd->pdev->dev;
544 u32 status;
545
546 if (idxd_is_enabled(idxd)) {
547 dev_dbg(dev, "Device already enabled\n");
548 return -ENXIO;
549 }
550
551 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
552
553 /* If the command is successful or if the device was enabled */
554 if (status != IDXD_CMDSTS_SUCCESS &&
555 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
556 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
557 return -ENXIO;
558 }
559
560 idxd->state = IDXD_DEV_ENABLED;
561 return 0;
562}
563
564int idxd_device_disable(struct idxd_device *idxd)
565{
566 struct device *dev = &idxd->pdev->dev;
567 u32 status;
568
569 if (!idxd_is_enabled(idxd)) {
570 dev_dbg(dev, "Device is not enabled\n");
571 return 0;
572 }
573
574 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
575
576 /* If the command is successful or if the device was disabled */
577 if (status != IDXD_CMDSTS_SUCCESS &&
578 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
579 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
580 return -ENXIO;
581 }
582
583 idxd_device_clear_state(idxd);
584 return 0;
585}
586
587void idxd_device_reset(struct idxd_device *idxd)
588{
589 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
590 idxd_device_clear_state(idxd);
591 spin_lock(&idxd->dev_lock);
592 idxd_unmask_error_interrupts(idxd);
593 spin_unlock(&idxd->dev_lock);
594}
595
596void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
597{
598 struct device *dev = &idxd->pdev->dev;
599 u32 operand;
600
601 operand = pasid;
602 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
603 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
604 dev_dbg(dev, "pasid %d drained\n", pasid);
605}
606
607int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
608 enum idxd_interrupt_type irq_type)
609{
610 struct device *dev = &idxd->pdev->dev;
611 u32 operand, status;
612
613 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
614 return -EOPNOTSUPP;
615
616 dev_dbg(dev, "get int handle, idx %d\n", idx);
617
618 operand = idx & GENMASK(15, 0);
619 if (irq_type == IDXD_IRQ_IMS)
620 operand |= CMD_INT_HANDLE_IMS;
621
622 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
623
624 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
625
626 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
627 dev_dbg(dev, "request int handle failed: %#x\n", status);
628 return -ENXIO;
629 }
630
631 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
632
633 dev_dbg(dev, "int handle acquired: %u\n", *handle);
634 return 0;
635}
636
637int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
638 enum idxd_interrupt_type irq_type)
639{
640 struct device *dev = &idxd->pdev->dev;
641 u32 operand, status;
642 union idxd_command_reg cmd;
643
644 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
645 return -EOPNOTSUPP;
646
647 dev_dbg(dev, "release int handle, handle %d\n", handle);
648
649 memset(&cmd, 0, sizeof(cmd));
650 operand = handle & GENMASK(15, 0);
651
652 if (irq_type == IDXD_IRQ_IMS)
653 operand |= CMD_INT_HANDLE_IMS;
654
655 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
656 cmd.operand = operand;
657
658 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
659
660 spin_lock(&idxd->cmd_lock);
661 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
662
663 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
664 cpu_relax();
665 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
666 spin_unlock(&idxd->cmd_lock);
667
668 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
669 dev_dbg(dev, "release int handle failed: %#x\n", status);
670 return -ENXIO;
671 }
672
673 dev_dbg(dev, "int handle released.\n");
674 return 0;
675}
676
677/* Device configuration bits */
678static void idxd_engines_clear_state(struct idxd_device *idxd)
679{
680 struct idxd_engine *engine;
681 int i;
682
683 lockdep_assert_held(&idxd->dev_lock);
684 for (i = 0; i < idxd->max_engines; i++) {
685 engine = idxd->engines[i];
686 engine->group = NULL;
687 }
688}
689
690static void idxd_groups_clear_state(struct idxd_device *idxd)
691{
692 struct idxd_group *group;
693 int i;
694
695 lockdep_assert_held(&idxd->dev_lock);
696 for (i = 0; i < idxd->max_groups; i++) {
697 group = idxd->groups[i];
698 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
699 group->num_engines = 0;
700 group->num_wqs = 0;
701 group->use_rdbuf_limit = false;
702 /*
703 * The default value is the same as the value of
704 * total read buffers in GRPCAP.
705 */
706 group->rdbufs_allowed = idxd->max_rdbufs;
707 group->rdbufs_reserved = 0;
708 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
709 group->tc_a = 1;
710 group->tc_b = 1;
711 } else {
712 group->tc_a = -1;
713 group->tc_b = -1;
714 }
715 group->desc_progress_limit = 0;
716 group->batch_progress_limit = 0;
717 }
718}
719
720static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
721{
722 int i;
723
724 for (i = 0; i < idxd->max_wqs; i++) {
725 struct idxd_wq *wq = idxd->wqs[i];
726
727 mutex_lock(&wq->wq_lock);
728 idxd_wq_disable_cleanup(wq);
729 idxd_wq_device_reset_cleanup(wq);
730 mutex_unlock(&wq->wq_lock);
731 }
732}
733
734void idxd_device_clear_state(struct idxd_device *idxd)
735{
736 /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
737 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
738 /*
739 * Clearing wq state is protected by wq lock.
740 * So no need to be protected by device lock.
741 */
742 idxd_device_wqs_clear_state(idxd);
743
744 spin_lock(&idxd->dev_lock);
745 idxd_groups_clear_state(idxd);
746 idxd_engines_clear_state(idxd);
747 } else {
748 spin_lock(&idxd->dev_lock);
749 }
750
751 idxd->state = IDXD_DEV_DISABLED;
752 spin_unlock(&idxd->dev_lock);
753}
754
755static int idxd_device_evl_setup(struct idxd_device *idxd)
756{
757 union gencfg_reg gencfg;
758 union evlcfg_reg evlcfg;
759 union genctrl_reg genctrl;
760 struct device *dev = &idxd->pdev->dev;
761 void *addr;
762 dma_addr_t dma_addr;
763 int size;
764 struct idxd_evl *evl = idxd->evl;
765 unsigned long *bmap;
766 int rc;
767
768 if (!evl)
769 return 0;
770
771 size = evl_size(idxd);
772
773 bmap = bitmap_zalloc(size, GFP_KERNEL);
774 if (!bmap) {
775 rc = -ENOMEM;
776 goto err_bmap;
777 }
778
779 /*
780 * Address needs to be page aligned. However, dma_alloc_coherent() provides
781 * at minimal page size aligned address. No manual alignment required.
782 */
783 addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
784 if (!addr) {
785 rc = -ENOMEM;
786 goto err_alloc;
787 }
788
789 memset(addr, 0, size);
790
791 spin_lock(&evl->lock);
792 evl->log = addr;
793 evl->dma = dma_addr;
794 evl->log_size = size;
795 evl->bmap = bmap;
796
797 memset(&evlcfg, 0, sizeof(evlcfg));
798 evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
799 evlcfg.size = evl->size;
800
801 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
802 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
803
804 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
805 genctrl.evl_int_en = 1;
806 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
807
808 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
809 gencfg.evl_en = 1;
810 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
811
812 spin_unlock(&evl->lock);
813 return 0;
814
815err_alloc:
816 bitmap_free(bmap);
817err_bmap:
818 return rc;
819}
820
821static void idxd_device_evl_free(struct idxd_device *idxd)
822{
823 union gencfg_reg gencfg;
824 union genctrl_reg genctrl;
825 struct device *dev = &idxd->pdev->dev;
826 struct idxd_evl *evl = idxd->evl;
827
828 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
829 if (!gencfg.evl_en)
830 return;
831
832 spin_lock(&evl->lock);
833 gencfg.evl_en = 0;
834 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
835
836 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
837 genctrl.evl_int_en = 0;
838 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
839
840 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
841 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
842
843 dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
844 bitmap_free(evl->bmap);
845 evl->log = NULL;
846 evl->size = IDXD_EVL_SIZE_MIN;
847 spin_unlock(&evl->lock);
848}
849
850static void idxd_group_config_write(struct idxd_group *group)
851{
852 struct idxd_device *idxd = group->idxd;
853 struct device *dev = &idxd->pdev->dev;
854 int i;
855 u32 grpcfg_offset;
856
857 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
858
859 /* setup GRPWQCFG */
860 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
861 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
862 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
863 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
864 group->id, i, grpcfg_offset,
865 ioread64(idxd->reg_base + grpcfg_offset));
866 }
867
868 /* setup GRPENGCFG */
869 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
870 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
871 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
872 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
873
874 /* setup GRPFLAGS */
875 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
876 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
877 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
878 group->id, grpcfg_offset,
879 ioread64(idxd->reg_base + grpcfg_offset));
880}
881
882static int idxd_groups_config_write(struct idxd_device *idxd)
883
884{
885 union gencfg_reg reg;
886 int i;
887 struct device *dev = &idxd->pdev->dev;
888
889 /* Setup bandwidth rdbuf limit */
890 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
891 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
892 reg.rdbuf_limit = idxd->rdbuf_limit;
893 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
894 }
895
896 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
897 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
898
899 for (i = 0; i < idxd->max_groups; i++) {
900 struct idxd_group *group = idxd->groups[i];
901
902 idxd_group_config_write(group);
903 }
904
905 return 0;
906}
907
908static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
909{
910 struct pci_dev *pdev = idxd->pdev;
911
912 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
913 return true;
914 return false;
915}
916
917static int idxd_wq_config_write(struct idxd_wq *wq)
918{
919 struct idxd_device *idxd = wq->idxd;
920 struct device *dev = &idxd->pdev->dev;
921 u32 wq_offset;
922 int i, n;
923
924 if (!wq->group)
925 return 0;
926
927 /*
928 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
929 * wq reset. This will copy back the sticky values that are present on some devices.
930 */
931 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
932 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
933 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
934 }
935
936 if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
937 wq->size = WQ_DEFAULT_QUEUE_DEPTH;
938
939 /* byte 0-3 */
940 wq->wqcfg->wq_size = wq->size;
941
942 /* bytes 4-7 */
943 wq->wqcfg->wq_thresh = wq->threshold;
944
945 /* byte 8-11 */
946 if (wq_dedicated(wq))
947 wq->wqcfg->mode = 1;
948
949 /*
950 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
951 * WQ type is kernel to indicate privileged access. This setting only
952 * matters for dedicated WQ. According to the DSA spec:
953 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
954 * Privileged Mode Enable field of the PCI Express PASID capability
955 * is 0, this field must be 0.
956 *
957 * In the case of a dedicated kernel WQ that is not able to support
958 * the PASID cap, then the configuration will be rejected.
959 */
960 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
961 !idxd_device_pasid_priv_enabled(idxd) &&
962 wq->type == IDXD_WQT_KERNEL) {
963 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
964 return -EOPNOTSUPP;
965 }
966
967 wq->wqcfg->priority = wq->priority;
968
969 if (idxd->hw.gen_cap.block_on_fault &&
970 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
971 !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
972 wq->wqcfg->bof = 1;
973
974 if (idxd->hw.wq_cap.wq_ats_support)
975 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
976
977 if (idxd->hw.wq_cap.wq_prs_support)
978 wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
979
980 /* bytes 12-15 */
981 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
982 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
983
984 /* bytes 32-63 */
985 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
986 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
987 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
988 int pos = n % BITS_PER_LONG_LONG;
989 int idx = n / BITS_PER_LONG_LONG;
990
991 wq->wqcfg->op_config[idx] |= BIT(pos);
992 }
993 }
994
995 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
996 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
997 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
998 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
999 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
1000 wq->id, i, wq_offset,
1001 ioread32(idxd->reg_base + wq_offset));
1002 }
1003
1004 return 0;
1005}
1006
1007static int idxd_wqs_config_write(struct idxd_device *idxd)
1008{
1009 int i, rc;
1010
1011 for (i = 0; i < idxd->max_wqs; i++) {
1012 struct idxd_wq *wq = idxd->wqs[i];
1013
1014 rc = idxd_wq_config_write(wq);
1015 if (rc < 0)
1016 return rc;
1017 }
1018
1019 return 0;
1020}
1021
1022static void idxd_group_flags_setup(struct idxd_device *idxd)
1023{
1024 int i;
1025
1026 /* TC-A 0 and TC-B 1 should be defaults */
1027 for (i = 0; i < idxd->max_groups; i++) {
1028 struct idxd_group *group = idxd->groups[i];
1029
1030 if (group->tc_a == -1)
1031 group->tc_a = group->grpcfg.flags.tc_a = 0;
1032 else
1033 group->grpcfg.flags.tc_a = group->tc_a;
1034 if (group->tc_b == -1)
1035 group->tc_b = group->grpcfg.flags.tc_b = 1;
1036 else
1037 group->grpcfg.flags.tc_b = group->tc_b;
1038 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
1039 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
1040 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
1041 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
1042 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
1043 }
1044}
1045
1046static int idxd_engines_setup(struct idxd_device *idxd)
1047{
1048 int i, engines = 0;
1049 struct idxd_engine *eng;
1050 struct idxd_group *group;
1051
1052 for (i = 0; i < idxd->max_groups; i++) {
1053 group = idxd->groups[i];
1054 group->grpcfg.engines = 0;
1055 }
1056
1057 for (i = 0; i < idxd->max_engines; i++) {
1058 eng = idxd->engines[i];
1059 group = eng->group;
1060
1061 if (!group)
1062 continue;
1063
1064 group->grpcfg.engines |= BIT(eng->id);
1065 engines++;
1066 }
1067
1068 if (!engines)
1069 return -EINVAL;
1070
1071 return 0;
1072}
1073
1074static int idxd_wqs_setup(struct idxd_device *idxd)
1075{
1076 struct idxd_wq *wq;
1077 struct idxd_group *group;
1078 int i, j, configured = 0;
1079 struct device *dev = &idxd->pdev->dev;
1080
1081 for (i = 0; i < idxd->max_groups; i++) {
1082 group = idxd->groups[i];
1083 for (j = 0; j < 4; j++)
1084 group->grpcfg.wqs[j] = 0;
1085 }
1086
1087 for (i = 0; i < idxd->max_wqs; i++) {
1088 wq = idxd->wqs[i];
1089 group = wq->group;
1090
1091 if (!wq->group)
1092 continue;
1093
1094 if (wq_shared(wq) && !wq_shared_supported(wq)) {
1095 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1096 dev_warn(dev, "No shared wq support but configured.\n");
1097 return -EINVAL;
1098 }
1099
1100 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1101 configured++;
1102 }
1103
1104 if (configured == 0) {
1105 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1106 return -EINVAL;
1107 }
1108
1109 return 0;
1110}
1111
1112int idxd_device_config(struct idxd_device *idxd)
1113{
1114 int rc;
1115
1116 lockdep_assert_held(&idxd->dev_lock);
1117 rc = idxd_wqs_setup(idxd);
1118 if (rc < 0)
1119 return rc;
1120
1121 rc = idxd_engines_setup(idxd);
1122 if (rc < 0)
1123 return rc;
1124
1125 idxd_group_flags_setup(idxd);
1126
1127 rc = idxd_wqs_config_write(idxd);
1128 if (rc < 0)
1129 return rc;
1130
1131 rc = idxd_groups_config_write(idxd);
1132 if (rc < 0)
1133 return rc;
1134
1135 return 0;
1136}
1137
1138static int idxd_wq_load_config(struct idxd_wq *wq)
1139{
1140 struct idxd_device *idxd = wq->idxd;
1141 struct device *dev = &idxd->pdev->dev;
1142 int wqcfg_offset;
1143 int i;
1144
1145 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1146 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1147
1148 wq->size = wq->wqcfg->wq_size;
1149 wq->threshold = wq->wqcfg->wq_thresh;
1150
1151 /* The driver does not support shared WQ mode in read-only config yet */
1152 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1153 return -EOPNOTSUPP;
1154
1155 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1156
1157 wq->priority = wq->wqcfg->priority;
1158
1159 wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1160 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1161
1162 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1163 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1164 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1165 }
1166
1167 return 0;
1168}
1169
1170static void idxd_group_load_config(struct idxd_group *group)
1171{
1172 struct idxd_device *idxd = group->idxd;
1173 struct device *dev = &idxd->pdev->dev;
1174 int i, j, grpcfg_offset;
1175
1176 /*
1177 * Load WQS bit fields
1178 * Iterate through all 256 bits 64 bits at a time
1179 */
1180 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1181 struct idxd_wq *wq;
1182
1183 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1184 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1185 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1186 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1187
1188 if (i * 64 >= idxd->max_wqs)
1189 break;
1190
1191 /* Iterate through all 64 bits and check for wq set */
1192 for (j = 0; j < 64; j++) {
1193 int id = i * 64 + j;
1194
1195 /* No need to check beyond max wqs */
1196 if (id >= idxd->max_wqs)
1197 break;
1198
1199 /* Set group assignment for wq if wq bit is set */
1200 if (group->grpcfg.wqs[i] & BIT(j)) {
1201 wq = idxd->wqs[id];
1202 wq->group = group;
1203 }
1204 }
1205 }
1206
1207 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1208 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1209 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1210 grpcfg_offset, group->grpcfg.engines);
1211
1212 /* Iterate through all 64 bits to check engines set */
1213 for (i = 0; i < 64; i++) {
1214 if (i >= idxd->max_engines)
1215 break;
1216
1217 if (group->grpcfg.engines & BIT(i)) {
1218 struct idxd_engine *engine = idxd->engines[i];
1219
1220 engine->group = group;
1221 }
1222 }
1223
1224 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1225 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1226 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1227 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1228}
1229
1230int idxd_device_load_config(struct idxd_device *idxd)
1231{
1232 union gencfg_reg reg;
1233 int i, rc;
1234
1235 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1236 idxd->rdbuf_limit = reg.rdbuf_limit;
1237
1238 for (i = 0; i < idxd->max_groups; i++) {
1239 struct idxd_group *group = idxd->groups[i];
1240
1241 idxd_group_load_config(group);
1242 }
1243
1244 for (i = 0; i < idxd->max_wqs; i++) {
1245 struct idxd_wq *wq = idxd->wqs[i];
1246
1247 rc = idxd_wq_load_config(wq);
1248 if (rc < 0)
1249 return rc;
1250 }
1251
1252 return 0;
1253}
1254
1255static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1256{
1257 struct idxd_desc *desc, *itr;
1258 struct llist_node *head;
1259 LIST_HEAD(flist);
1260 enum idxd_complete_type ctype;
1261
1262 spin_lock(&ie->list_lock);
1263 head = llist_del_all(&ie->pending_llist);
1264 if (head) {
1265 llist_for_each_entry_safe(desc, itr, head, llnode)
1266 list_add_tail(&desc->list, &ie->work_list);
1267 }
1268
1269 list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1270 list_move_tail(&desc->list, &flist);
1271 spin_unlock(&ie->list_lock);
1272
1273 list_for_each_entry_safe(desc, itr, &flist, list) {
1274 struct dma_async_tx_descriptor *tx;
1275
1276 list_del(&desc->list);
1277 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1278 /*
1279 * wq is being disabled. Any remaining descriptors are
1280 * likely to be stuck and can be dropped. callback could
1281 * point to code that is no longer accessible, for example
1282 * if dmatest module has been unloaded.
1283 */
1284 tx = &desc->txd;
1285 tx->callback = NULL;
1286 tx->callback_result = NULL;
1287 idxd_dma_complete_txd(desc, ctype, true);
1288 }
1289}
1290
1291static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1292 struct idxd_irq_entry *ie)
1293{
1294 union msix_perm mperm;
1295
1296 if (ie->pasid == IOMMU_PASID_INVALID)
1297 return;
1298
1299 mperm.bits = 0;
1300 mperm.pasid = ie->pasid;
1301 mperm.pasid_en = 1;
1302 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1303}
1304
1305static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1306 struct idxd_irq_entry *ie)
1307{
1308 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1309}
1310
1311void idxd_wq_free_irq(struct idxd_wq *wq)
1312{
1313 struct idxd_device *idxd = wq->idxd;
1314 struct idxd_irq_entry *ie = &wq->ie;
1315
1316 if (wq->type != IDXD_WQT_KERNEL)
1317 return;
1318
1319 free_irq(ie->vector, ie);
1320 idxd_flush_pending_descs(ie);
1321 if (idxd->request_int_handles)
1322 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1323 idxd_device_clear_perm_entry(idxd, ie);
1324 ie->vector = -1;
1325 ie->int_handle = INVALID_INT_HANDLE;
1326 ie->pasid = IOMMU_PASID_INVALID;
1327}
1328
1329int idxd_wq_request_irq(struct idxd_wq *wq)
1330{
1331 struct idxd_device *idxd = wq->idxd;
1332 struct pci_dev *pdev = idxd->pdev;
1333 struct device *dev = &pdev->dev;
1334 struct idxd_irq_entry *ie;
1335 int rc;
1336
1337 if (wq->type != IDXD_WQT_KERNEL)
1338 return 0;
1339
1340 ie = &wq->ie;
1341 ie->vector = pci_irq_vector(pdev, ie->id);
1342 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1343 idxd_device_set_perm_entry(idxd, ie);
1344
1345 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1346 if (rc < 0) {
1347 dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1348 goto err_irq;
1349 }
1350
1351 if (idxd->request_int_handles) {
1352 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1353 IDXD_IRQ_MSIX);
1354 if (rc < 0)
1355 goto err_int_handle;
1356 } else {
1357 ie->int_handle = ie->id;
1358 }
1359
1360 return 0;
1361
1362err_int_handle:
1363 ie->int_handle = INVALID_INT_HANDLE;
1364 free_irq(ie->vector, ie);
1365err_irq:
1366 idxd_device_clear_perm_entry(idxd, ie);
1367 ie->pasid = IOMMU_PASID_INVALID;
1368 return rc;
1369}
1370
1371int drv_enable_wq(struct idxd_wq *wq)
1372{
1373 struct idxd_device *idxd = wq->idxd;
1374 struct device *dev = &idxd->pdev->dev;
1375 int rc = -ENXIO;
1376
1377 lockdep_assert_held(&wq->wq_lock);
1378
1379 if (idxd->state != IDXD_DEV_ENABLED) {
1380 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1381 goto err;
1382 }
1383
1384 if (wq->state != IDXD_WQ_DISABLED) {
1385 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1386 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1387 rc = -EBUSY;
1388 goto err;
1389 }
1390
1391 if (!wq->group) {
1392 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1393 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1394 goto err;
1395 }
1396
1397 if (strlen(wq->name) == 0) {
1398 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1399 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1400 goto err;
1401 }
1402
1403 /* Shared WQ checks */
1404 if (wq_shared(wq)) {
1405 if (!wq_shared_supported(wq)) {
1406 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1407 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1408 goto err;
1409 }
1410 /*
1411 * Shared wq with the threshold set to 0 means the user
1412 * did not set the threshold or transitioned from a
1413 * dedicated wq but did not set threshold. A value
1414 * of 0 would effectively disable the shared wq. The
1415 * driver does not allow a value of 0 to be set for
1416 * threshold via sysfs.
1417 */
1418 if (wq->threshold == 0) {
1419 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1420 dev_dbg(dev, "Shared wq and threshold 0.\n");
1421 goto err;
1422 }
1423 }
1424
1425 /*
1426 * In the event that the WQ is configurable for pasid and priv bits.
1427 * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
1428 * However, for non-kernel wq, the driver should only set the pasid_en bit for
1429 * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
1430 * pasid_en later on so there is no need to setup.
1431 */
1432 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1433 int priv = 0;
1434
1435 if (wq_pasid_enabled(wq)) {
1436 if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1437 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1438
1439 __idxd_wq_set_pasid_locked(wq, pasid);
1440 }
1441 }
1442
1443 if (is_idxd_wq_kernel(wq))
1444 priv = 1;
1445 __idxd_wq_set_priv_locked(wq, priv);
1446 }
1447
1448 rc = 0;
1449 spin_lock(&idxd->dev_lock);
1450 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1451 rc = idxd_device_config(idxd);
1452 spin_unlock(&idxd->dev_lock);
1453 if (rc < 0) {
1454 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1455 goto err;
1456 }
1457
1458 rc = idxd_wq_enable(wq);
1459 if (rc < 0) {
1460 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1461 goto err;
1462 }
1463
1464 rc = idxd_wq_map_portal(wq);
1465 if (rc < 0) {
1466 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1467 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1468 goto err_map_portal;
1469 }
1470
1471 wq->client_count = 0;
1472
1473 rc = idxd_wq_request_irq(wq);
1474 if (rc < 0) {
1475 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1476 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1477 goto err_irq;
1478 }
1479
1480 rc = idxd_wq_alloc_resources(wq);
1481 if (rc < 0) {
1482 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1483 dev_dbg(dev, "WQ resource alloc failed\n");
1484 goto err_res_alloc;
1485 }
1486
1487 rc = idxd_wq_init_percpu_ref(wq);
1488 if (rc < 0) {
1489 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1490 dev_dbg(dev, "percpu_ref setup failed\n");
1491 goto err_ref;
1492 }
1493
1494 return 0;
1495
1496err_ref:
1497 idxd_wq_free_resources(wq);
1498err_res_alloc:
1499 idxd_wq_free_irq(wq);
1500err_irq:
1501 idxd_wq_unmap_portal(wq);
1502err_map_portal:
1503 if (idxd_wq_disable(wq, false))
1504 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1505err:
1506 return rc;
1507}
1508
1509void drv_disable_wq(struct idxd_wq *wq)
1510{
1511 struct idxd_device *idxd = wq->idxd;
1512 struct device *dev = &idxd->pdev->dev;
1513
1514 lockdep_assert_held(&wq->wq_lock);
1515
1516 if (idxd_wq_refcount(wq))
1517 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1518 wq->id, idxd_wq_refcount(wq));
1519
1520 idxd_wq_unmap_portal(wq);
1521 idxd_wq_drain(wq);
1522 idxd_wq_free_irq(wq);
1523 idxd_wq_reset(wq);
1524 idxd_wq_free_resources(wq);
1525 percpu_ref_exit(&wq->wq_active);
1526 wq->type = IDXD_WQT_NONE;
1527 wq->client_count = 0;
1528}
1529
1530int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1531{
1532 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1533 int rc = 0;
1534
1535 /*
1536 * Device should be in disabled state for the idxd_drv to load. If it's in
1537 * enabled state, then the device was altered outside of driver's control.
1538 * If the state is in halted state, then we don't want to proceed.
1539 */
1540 if (idxd->state != IDXD_DEV_DISABLED) {
1541 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1542 return -ENXIO;
1543 }
1544
1545 /* Device configuration */
1546 spin_lock(&idxd->dev_lock);
1547 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1548 rc = idxd_device_config(idxd);
1549 spin_unlock(&idxd->dev_lock);
1550 if (rc < 0)
1551 return -ENXIO;
1552
1553 rc = idxd_device_evl_setup(idxd);
1554 if (rc < 0) {
1555 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1556 return rc;
1557 }
1558
1559 /* Start device */
1560 rc = idxd_device_enable(idxd);
1561 if (rc < 0) {
1562 idxd_device_evl_free(idxd);
1563 return rc;
1564 }
1565
1566 /* Setup DMA device without channels */
1567 rc = idxd_register_dma_device(idxd);
1568 if (rc < 0) {
1569 idxd_device_disable(idxd);
1570 idxd_device_evl_free(idxd);
1571 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1572 return rc;
1573 }
1574
1575 idxd->cmd_status = 0;
1576 return 0;
1577}
1578
1579void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1580{
1581 struct device *dev = &idxd_dev->conf_dev;
1582 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1583 int i;
1584
1585 for (i = 0; i < idxd->max_wqs; i++) {
1586 struct idxd_wq *wq = idxd->wqs[i];
1587 struct device *wq_dev = wq_confdev(wq);
1588
1589 if (wq->state == IDXD_WQ_DISABLED)
1590 continue;
1591 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1592 device_release_driver(wq_dev);
1593 }
1594
1595 idxd_unregister_dma_device(idxd);
1596 idxd_device_disable(idxd);
1597 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1598 idxd_device_reset(idxd);
1599 idxd_device_evl_free(idxd);
1600}
1601
1602static enum idxd_dev_type dev_types[] = {
1603 IDXD_DEV_DSA,
1604 IDXD_DEV_IAX,
1605 IDXD_DEV_NONE,
1606};
1607
1608struct idxd_device_driver idxd_drv = {
1609 .type = dev_types,
1610 .probe = idxd_device_drv_probe,
1611 .remove = idxd_device_drv_remove,
1612 .name = "idxd",
1613};
1614EXPORT_SYMBOL_GPL(idxd_drv);