Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17};
18
19static void idxd_conf_device_release(struct device *dev)
20{
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22}
23
24static struct device_type idxd_group_device_type = {
25 .name = "group",
26 .release = idxd_conf_device_release,
27};
28
29static struct device_type idxd_wq_device_type = {
30 .name = "wq",
31 .release = idxd_conf_device_release,
32};
33
34static struct device_type idxd_engine_device_type = {
35 .name = "engine",
36 .release = idxd_conf_device_release,
37};
38
39static struct device_type dsa_device_type = {
40 .name = "dsa",
41 .release = idxd_conf_device_release,
42};
43
44static inline bool is_dsa_dev(struct device *dev)
45{
46 return dev ? dev->type == &dsa_device_type : false;
47}
48
49static inline bool is_idxd_dev(struct device *dev)
50{
51 return is_dsa_dev(dev);
52}
53
54static inline bool is_idxd_wq_dev(struct device *dev)
55{
56 return dev ? dev->type == &idxd_wq_device_type : false;
57}
58
59static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60{
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
63 return true;
64 return false;
65}
66
67static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68{
69 return wq->type == IDXD_WQT_USER;
70}
71
72static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
74{
75 int matched = 0;
76
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
79
80 if (idxd->state != IDXD_DEV_CONF_READY)
81 return 0;
82 matched = 1;
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
86
87 if (idxd->state < IDXD_DEV_CONF_READY)
88 return 0;
89
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 return 0;
93 }
94 matched = 1;
95 }
96
97 if (matched)
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
99
100 return matched;
101}
102
103static int idxd_config_bus_probe(struct device *dev)
104{
105 int rc;
106 unsigned long flags;
107
108 dev_dbg(dev, "%s called\n", __func__);
109
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
112
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
115 return -EBUSY;
116 }
117
118 if (!try_module_get(THIS_MODULE))
119 return -ENXIO;
120
121 /* Perform IDXD configuration and enabling */
122 spin_lock_irqsave(&idxd->dev_lock, flags);
123 rc = idxd_device_config(idxd);
124 spin_unlock_irqrestore(&idxd->dev_lock, flags);
125 if (rc < 0) {
126 module_put(THIS_MODULE);
127 dev_warn(dev, "Device config failed: %d\n", rc);
128 return rc;
129 }
130
131 /* start device */
132 rc = idxd_device_enable(idxd);
133 if (rc < 0) {
134 module_put(THIS_MODULE);
135 dev_warn(dev, "Device enable failed: %d\n", rc);
136 return rc;
137 }
138
139 dev_info(dev, "Device %s enabled\n", dev_name(dev));
140
141 rc = idxd_register_dma_device(idxd);
142 if (rc < 0) {
143 module_put(THIS_MODULE);
144 dev_dbg(dev, "Failed to register dmaengine device\n");
145 return rc;
146 }
147 return 0;
148 } else if (is_idxd_wq_dev(dev)) {
149 struct idxd_wq *wq = confdev_to_wq(dev);
150 struct idxd_device *idxd = wq->idxd;
151
152 mutex_lock(&wq->wq_lock);
153
154 if (idxd->state != IDXD_DEV_ENABLED) {
155 mutex_unlock(&wq->wq_lock);
156 dev_warn(dev, "Enabling while device not enabled.\n");
157 return -EPERM;
158 }
159
160 if (wq->state != IDXD_WQ_DISABLED) {
161 mutex_unlock(&wq->wq_lock);
162 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
163 return -EBUSY;
164 }
165
166 if (!wq->group) {
167 mutex_unlock(&wq->wq_lock);
168 dev_warn(dev, "WQ not attached to group.\n");
169 return -EINVAL;
170 }
171
172 if (strlen(wq->name) == 0) {
173 mutex_unlock(&wq->wq_lock);
174 dev_warn(dev, "WQ name not set.\n");
175 return -EINVAL;
176 }
177
178 rc = idxd_wq_alloc_resources(wq);
179 if (rc < 0) {
180 mutex_unlock(&wq->wq_lock);
181 dev_warn(dev, "WQ resource alloc failed\n");
182 return rc;
183 }
184
185 spin_lock_irqsave(&idxd->dev_lock, flags);
186 rc = idxd_device_config(idxd);
187 spin_unlock_irqrestore(&idxd->dev_lock, flags);
188 if (rc < 0) {
189 mutex_unlock(&wq->wq_lock);
190 dev_warn(dev, "Writing WQ %d config failed: %d\n",
191 wq->id, rc);
192 return rc;
193 }
194
195 rc = idxd_wq_enable(wq);
196 if (rc < 0) {
197 mutex_unlock(&wq->wq_lock);
198 dev_warn(dev, "WQ %d enabling failed: %d\n",
199 wq->id, rc);
200 return rc;
201 }
202
203 rc = idxd_wq_map_portal(wq);
204 if (rc < 0) {
205 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
206 rc = idxd_wq_disable(wq);
207 if (rc < 0)
208 dev_warn(dev, "IDXD wq disable failed\n");
209 mutex_unlock(&wq->wq_lock);
210 return rc;
211 }
212
213 wq->client_count = 0;
214
215 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
216
217 if (is_idxd_wq_dmaengine(wq)) {
218 rc = idxd_register_dma_channel(wq);
219 if (rc < 0) {
220 dev_dbg(dev, "DMA channel register failed\n");
221 mutex_unlock(&wq->wq_lock);
222 return rc;
223 }
224 } else if (is_idxd_wq_cdev(wq)) {
225 rc = idxd_wq_add_cdev(wq);
226 if (rc < 0) {
227 dev_dbg(dev, "Cdev creation failed\n");
228 mutex_unlock(&wq->wq_lock);
229 return rc;
230 }
231 }
232
233 mutex_unlock(&wq->wq_lock);
234 return 0;
235 }
236
237 return -ENODEV;
238}
239
240static void disable_wq(struct idxd_wq *wq)
241{
242 struct idxd_device *idxd = wq->idxd;
243 struct device *dev = &idxd->pdev->dev;
244 int rc;
245
246 mutex_lock(&wq->wq_lock);
247 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
248 if (wq->state == IDXD_WQ_DISABLED) {
249 mutex_unlock(&wq->wq_lock);
250 return;
251 }
252
253 if (is_idxd_wq_dmaengine(wq))
254 idxd_unregister_dma_channel(wq);
255 else if (is_idxd_wq_cdev(wq))
256 idxd_wq_del_cdev(wq);
257
258 if (idxd_wq_refcount(wq))
259 dev_warn(dev, "Clients has claim on wq %d: %d\n",
260 wq->id, idxd_wq_refcount(wq));
261
262 idxd_wq_unmap_portal(wq);
263
264 idxd_wq_drain(wq);
265 rc = idxd_wq_disable(wq);
266
267 idxd_wq_free_resources(wq);
268 wq->client_count = 0;
269 mutex_unlock(&wq->wq_lock);
270
271 if (rc < 0)
272 dev_warn(dev, "Failed to disable %s: %d\n",
273 dev_name(&wq->conf_dev), rc);
274 else
275 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
276}
277
278static int idxd_config_bus_remove(struct device *dev)
279{
280 int rc;
281
282 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
283
284 /* disable workqueue here */
285 if (is_idxd_wq_dev(dev)) {
286 struct idxd_wq *wq = confdev_to_wq(dev);
287
288 disable_wq(wq);
289 } else if (is_idxd_dev(dev)) {
290 struct idxd_device *idxd = confdev_to_idxd(dev);
291 int i;
292
293 dev_dbg(dev, "%s removing dev %s\n", __func__,
294 dev_name(&idxd->conf_dev));
295 for (i = 0; i < idxd->max_wqs; i++) {
296 struct idxd_wq *wq = &idxd->wqs[i];
297
298 if (wq->state == IDXD_WQ_DISABLED)
299 continue;
300 dev_warn(dev, "Active wq %d on disable %s.\n", i,
301 dev_name(&idxd->conf_dev));
302 device_release_driver(&wq->conf_dev);
303 }
304
305 idxd_unregister_dma_device(idxd);
306 rc = idxd_device_disable(idxd);
307 for (i = 0; i < idxd->max_wqs; i++) {
308 struct idxd_wq *wq = &idxd->wqs[i];
309
310 mutex_lock(&wq->wq_lock);
311 idxd_wq_disable_cleanup(wq);
312 mutex_unlock(&wq->wq_lock);
313 }
314 module_put(THIS_MODULE);
315 if (rc < 0)
316 dev_warn(dev, "Device disable failed\n");
317 else
318 dev_info(dev, "Device %s disabled\n", dev_name(dev));
319
320 }
321
322 return 0;
323}
324
325static void idxd_config_bus_shutdown(struct device *dev)
326{
327 dev_dbg(dev, "%s called\n", __func__);
328}
329
330struct bus_type dsa_bus_type = {
331 .name = "dsa",
332 .match = idxd_config_bus_match,
333 .probe = idxd_config_bus_probe,
334 .remove = idxd_config_bus_remove,
335 .shutdown = idxd_config_bus_shutdown,
336};
337
338static struct bus_type *idxd_bus_types[] = {
339 &dsa_bus_type
340};
341
342static struct idxd_device_driver dsa_drv = {
343 .drv = {
344 .name = "dsa",
345 .bus = &dsa_bus_type,
346 .owner = THIS_MODULE,
347 .mod_name = KBUILD_MODNAME,
348 },
349};
350
351static struct idxd_device_driver *idxd_drvs[] = {
352 &dsa_drv
353};
354
355struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
356{
357 return idxd_bus_types[idxd->type];
358}
359
360static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
361{
362 if (idxd->type == IDXD_TYPE_DSA)
363 return &dsa_device_type;
364 else
365 return NULL;
366}
367
368/* IDXD generic driver setup */
369int idxd_register_driver(void)
370{
371 int i, rc;
372
373 for (i = 0; i < IDXD_TYPE_MAX; i++) {
374 rc = driver_register(&idxd_drvs[i]->drv);
375 if (rc < 0)
376 goto drv_fail;
377 }
378
379 return 0;
380
381drv_fail:
382 for (; i > 0; i--)
383 driver_unregister(&idxd_drvs[i]->drv);
384 return rc;
385}
386
387void idxd_unregister_driver(void)
388{
389 int i;
390
391 for (i = 0; i < IDXD_TYPE_MAX; i++)
392 driver_unregister(&idxd_drvs[i]->drv);
393}
394
395/* IDXD engine attributes */
396static ssize_t engine_group_id_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct idxd_engine *engine =
400 container_of(dev, struct idxd_engine, conf_dev);
401
402 if (engine->group)
403 return sprintf(buf, "%d\n", engine->group->id);
404 else
405 return sprintf(buf, "%d\n", -1);
406}
407
408static ssize_t engine_group_id_store(struct device *dev,
409 struct device_attribute *attr,
410 const char *buf, size_t count)
411{
412 struct idxd_engine *engine =
413 container_of(dev, struct idxd_engine, conf_dev);
414 struct idxd_device *idxd = engine->idxd;
415 long id;
416 int rc;
417 struct idxd_group *prevg;
418
419 rc = kstrtol(buf, 10, &id);
420 if (rc < 0)
421 return -EINVAL;
422
423 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
424 return -EPERM;
425
426 if (id > idxd->max_groups - 1 || id < -1)
427 return -EINVAL;
428
429 if (id == -1) {
430 if (engine->group) {
431 engine->group->num_engines--;
432 engine->group = NULL;
433 }
434 return count;
435 }
436
437 prevg = engine->group;
438
439 if (prevg)
440 prevg->num_engines--;
441 engine->group = &idxd->groups[id];
442 engine->group->num_engines++;
443
444 return count;
445}
446
447static struct device_attribute dev_attr_engine_group =
448 __ATTR(group_id, 0644, engine_group_id_show,
449 engine_group_id_store);
450
451static struct attribute *idxd_engine_attributes[] = {
452 &dev_attr_engine_group.attr,
453 NULL,
454};
455
456static const struct attribute_group idxd_engine_attribute_group = {
457 .attrs = idxd_engine_attributes,
458};
459
460static const struct attribute_group *idxd_engine_attribute_groups[] = {
461 &idxd_engine_attribute_group,
462 NULL,
463};
464
465/* Group attributes */
466
467static void idxd_set_free_tokens(struct idxd_device *idxd)
468{
469 int i, tokens;
470
471 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
472 struct idxd_group *g = &idxd->groups[i];
473
474 tokens += g->tokens_reserved;
475 }
476
477 idxd->nr_tokens = idxd->max_tokens - tokens;
478}
479
480static ssize_t group_tokens_reserved_show(struct device *dev,
481 struct device_attribute *attr,
482 char *buf)
483{
484 struct idxd_group *group =
485 container_of(dev, struct idxd_group, conf_dev);
486
487 return sprintf(buf, "%u\n", group->tokens_reserved);
488}
489
490static ssize_t group_tokens_reserved_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t count)
493{
494 struct idxd_group *group =
495 container_of(dev, struct idxd_group, conf_dev);
496 struct idxd_device *idxd = group->idxd;
497 unsigned long val;
498 int rc;
499
500 rc = kstrtoul(buf, 10, &val);
501 if (rc < 0)
502 return -EINVAL;
503
504 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
505 return -EPERM;
506
507 if (idxd->state == IDXD_DEV_ENABLED)
508 return -EPERM;
509
510 if (val > idxd->max_tokens)
511 return -EINVAL;
512
513 if (val > idxd->nr_tokens + group->tokens_reserved)
514 return -EINVAL;
515
516 group->tokens_reserved = val;
517 idxd_set_free_tokens(idxd);
518 return count;
519}
520
521static struct device_attribute dev_attr_group_tokens_reserved =
522 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
523 group_tokens_reserved_store);
524
525static ssize_t group_tokens_allowed_show(struct device *dev,
526 struct device_attribute *attr,
527 char *buf)
528{
529 struct idxd_group *group =
530 container_of(dev, struct idxd_group, conf_dev);
531
532 return sprintf(buf, "%u\n", group->tokens_allowed);
533}
534
535static ssize_t group_tokens_allowed_store(struct device *dev,
536 struct device_attribute *attr,
537 const char *buf, size_t count)
538{
539 struct idxd_group *group =
540 container_of(dev, struct idxd_group, conf_dev);
541 struct idxd_device *idxd = group->idxd;
542 unsigned long val;
543 int rc;
544
545 rc = kstrtoul(buf, 10, &val);
546 if (rc < 0)
547 return -EINVAL;
548
549 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
550 return -EPERM;
551
552 if (idxd->state == IDXD_DEV_ENABLED)
553 return -EPERM;
554
555 if (val < 4 * group->num_engines ||
556 val > group->tokens_reserved + idxd->nr_tokens)
557 return -EINVAL;
558
559 group->tokens_allowed = val;
560 return count;
561}
562
563static struct device_attribute dev_attr_group_tokens_allowed =
564 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
565 group_tokens_allowed_store);
566
567static ssize_t group_use_token_limit_show(struct device *dev,
568 struct device_attribute *attr,
569 char *buf)
570{
571 struct idxd_group *group =
572 container_of(dev, struct idxd_group, conf_dev);
573
574 return sprintf(buf, "%u\n", group->use_token_limit);
575}
576
577static ssize_t group_use_token_limit_store(struct device *dev,
578 struct device_attribute *attr,
579 const char *buf, size_t count)
580{
581 struct idxd_group *group =
582 container_of(dev, struct idxd_group, conf_dev);
583 struct idxd_device *idxd = group->idxd;
584 unsigned long val;
585 int rc;
586
587 rc = kstrtoul(buf, 10, &val);
588 if (rc < 0)
589 return -EINVAL;
590
591 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
592 return -EPERM;
593
594 if (idxd->state == IDXD_DEV_ENABLED)
595 return -EPERM;
596
597 if (idxd->token_limit == 0)
598 return -EPERM;
599
600 group->use_token_limit = !!val;
601 return count;
602}
603
604static struct device_attribute dev_attr_group_use_token_limit =
605 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
606 group_use_token_limit_store);
607
608static ssize_t group_engines_show(struct device *dev,
609 struct device_attribute *attr, char *buf)
610{
611 struct idxd_group *group =
612 container_of(dev, struct idxd_group, conf_dev);
613 int i, rc = 0;
614 char *tmp = buf;
615 struct idxd_device *idxd = group->idxd;
616
617 for (i = 0; i < idxd->max_engines; i++) {
618 struct idxd_engine *engine = &idxd->engines[i];
619
620 if (!engine->group)
621 continue;
622
623 if (engine->group->id == group->id)
624 rc += sprintf(tmp + rc, "engine%d.%d ",
625 idxd->id, engine->id);
626 }
627
628 rc--;
629 rc += sprintf(tmp + rc, "\n");
630
631 return rc;
632}
633
634static struct device_attribute dev_attr_group_engines =
635 __ATTR(engines, 0444, group_engines_show, NULL);
636
637static ssize_t group_work_queues_show(struct device *dev,
638 struct device_attribute *attr, char *buf)
639{
640 struct idxd_group *group =
641 container_of(dev, struct idxd_group, conf_dev);
642 int i, rc = 0;
643 char *tmp = buf;
644 struct idxd_device *idxd = group->idxd;
645
646 for (i = 0; i < idxd->max_wqs; i++) {
647 struct idxd_wq *wq = &idxd->wqs[i];
648
649 if (!wq->group)
650 continue;
651
652 if (wq->group->id == group->id)
653 rc += sprintf(tmp + rc, "wq%d.%d ",
654 idxd->id, wq->id);
655 }
656
657 rc--;
658 rc += sprintf(tmp + rc, "\n");
659
660 return rc;
661}
662
663static struct device_attribute dev_attr_group_work_queues =
664 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
665
666static ssize_t group_traffic_class_a_show(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669{
670 struct idxd_group *group =
671 container_of(dev, struct idxd_group, conf_dev);
672
673 return sprintf(buf, "%d\n", group->tc_a);
674}
675
676static ssize_t group_traffic_class_a_store(struct device *dev,
677 struct device_attribute *attr,
678 const char *buf, size_t count)
679{
680 struct idxd_group *group =
681 container_of(dev, struct idxd_group, conf_dev);
682 struct idxd_device *idxd = group->idxd;
683 long val;
684 int rc;
685
686 rc = kstrtol(buf, 10, &val);
687 if (rc < 0)
688 return -EINVAL;
689
690 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
691 return -EPERM;
692
693 if (idxd->state == IDXD_DEV_ENABLED)
694 return -EPERM;
695
696 if (val < 0 || val > 7)
697 return -EINVAL;
698
699 group->tc_a = val;
700 return count;
701}
702
703static struct device_attribute dev_attr_group_traffic_class_a =
704 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
705 group_traffic_class_a_store);
706
707static ssize_t group_traffic_class_b_show(struct device *dev,
708 struct device_attribute *attr,
709 char *buf)
710{
711 struct idxd_group *group =
712 container_of(dev, struct idxd_group, conf_dev);
713
714 return sprintf(buf, "%d\n", group->tc_b);
715}
716
717static ssize_t group_traffic_class_b_store(struct device *dev,
718 struct device_attribute *attr,
719 const char *buf, size_t count)
720{
721 struct idxd_group *group =
722 container_of(dev, struct idxd_group, conf_dev);
723 struct idxd_device *idxd = group->idxd;
724 long val;
725 int rc;
726
727 rc = kstrtol(buf, 10, &val);
728 if (rc < 0)
729 return -EINVAL;
730
731 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
732 return -EPERM;
733
734 if (idxd->state == IDXD_DEV_ENABLED)
735 return -EPERM;
736
737 if (val < 0 || val > 7)
738 return -EINVAL;
739
740 group->tc_b = val;
741 return count;
742}
743
744static struct device_attribute dev_attr_group_traffic_class_b =
745 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
746 group_traffic_class_b_store);
747
748static struct attribute *idxd_group_attributes[] = {
749 &dev_attr_group_work_queues.attr,
750 &dev_attr_group_engines.attr,
751 &dev_attr_group_use_token_limit.attr,
752 &dev_attr_group_tokens_allowed.attr,
753 &dev_attr_group_tokens_reserved.attr,
754 &dev_attr_group_traffic_class_a.attr,
755 &dev_attr_group_traffic_class_b.attr,
756 NULL,
757};
758
759static const struct attribute_group idxd_group_attribute_group = {
760 .attrs = idxd_group_attributes,
761};
762
763static const struct attribute_group *idxd_group_attribute_groups[] = {
764 &idxd_group_attribute_group,
765 NULL,
766};
767
768/* IDXD work queue attribs */
769static ssize_t wq_clients_show(struct device *dev,
770 struct device_attribute *attr, char *buf)
771{
772 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
773
774 return sprintf(buf, "%d\n", wq->client_count);
775}
776
777static struct device_attribute dev_attr_wq_clients =
778 __ATTR(clients, 0444, wq_clients_show, NULL);
779
780static ssize_t wq_state_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
784
785 switch (wq->state) {
786 case IDXD_WQ_DISABLED:
787 return sprintf(buf, "disabled\n");
788 case IDXD_WQ_ENABLED:
789 return sprintf(buf, "enabled\n");
790 }
791
792 return sprintf(buf, "unknown\n");
793}
794
795static struct device_attribute dev_attr_wq_state =
796 __ATTR(state, 0444, wq_state_show, NULL);
797
798static ssize_t wq_group_id_show(struct device *dev,
799 struct device_attribute *attr, char *buf)
800{
801 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
802
803 if (wq->group)
804 return sprintf(buf, "%u\n", wq->group->id);
805 else
806 return sprintf(buf, "-1\n");
807}
808
809static ssize_t wq_group_id_store(struct device *dev,
810 struct device_attribute *attr,
811 const char *buf, size_t count)
812{
813 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
814 struct idxd_device *idxd = wq->idxd;
815 long id;
816 int rc;
817 struct idxd_group *prevg, *group;
818
819 rc = kstrtol(buf, 10, &id);
820 if (rc < 0)
821 return -EINVAL;
822
823 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
824 return -EPERM;
825
826 if (wq->state != IDXD_WQ_DISABLED)
827 return -EPERM;
828
829 if (id > idxd->max_groups - 1 || id < -1)
830 return -EINVAL;
831
832 if (id == -1) {
833 if (wq->group) {
834 wq->group->num_wqs--;
835 wq->group = NULL;
836 }
837 return count;
838 }
839
840 group = &idxd->groups[id];
841 prevg = wq->group;
842
843 if (prevg)
844 prevg->num_wqs--;
845 wq->group = group;
846 group->num_wqs++;
847 return count;
848}
849
850static struct device_attribute dev_attr_wq_group_id =
851 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
852
853static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
854 char *buf)
855{
856 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
857
858 return sprintf(buf, "%s\n",
859 wq_dedicated(wq) ? "dedicated" : "shared");
860}
861
862static ssize_t wq_mode_store(struct device *dev,
863 struct device_attribute *attr, const char *buf,
864 size_t count)
865{
866 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
867 struct idxd_device *idxd = wq->idxd;
868
869 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
870 return -EPERM;
871
872 if (wq->state != IDXD_WQ_DISABLED)
873 return -EPERM;
874
875 if (sysfs_streq(buf, "dedicated")) {
876 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
877 wq->threshold = 0;
878 } else {
879 return -EINVAL;
880 }
881
882 return count;
883}
884
885static struct device_attribute dev_attr_wq_mode =
886 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
887
888static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
889 char *buf)
890{
891 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
892
893 return sprintf(buf, "%u\n", wq->size);
894}
895
896static int total_claimed_wq_size(struct idxd_device *idxd)
897{
898 int i;
899 int wq_size = 0;
900
901 for (i = 0; i < idxd->max_wqs; i++) {
902 struct idxd_wq *wq = &idxd->wqs[i];
903
904 wq_size += wq->size;
905 }
906
907 return wq_size;
908}
909
910static ssize_t wq_size_store(struct device *dev,
911 struct device_attribute *attr, const char *buf,
912 size_t count)
913{
914 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
915 unsigned long size;
916 struct idxd_device *idxd = wq->idxd;
917 int rc;
918
919 rc = kstrtoul(buf, 10, &size);
920 if (rc < 0)
921 return -EINVAL;
922
923 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
924 return -EPERM;
925
926 if (wq->state != IDXD_WQ_DISABLED)
927 return -EPERM;
928
929 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
930 return -EINVAL;
931
932 wq->size = size;
933 return count;
934}
935
936static struct device_attribute dev_attr_wq_size =
937 __ATTR(size, 0644, wq_size_show, wq_size_store);
938
939static ssize_t wq_priority_show(struct device *dev,
940 struct device_attribute *attr, char *buf)
941{
942 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
943
944 return sprintf(buf, "%u\n", wq->priority);
945}
946
947static ssize_t wq_priority_store(struct device *dev,
948 struct device_attribute *attr,
949 const char *buf, size_t count)
950{
951 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
952 unsigned long prio;
953 struct idxd_device *idxd = wq->idxd;
954 int rc;
955
956 rc = kstrtoul(buf, 10, &prio);
957 if (rc < 0)
958 return -EINVAL;
959
960 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
961 return -EPERM;
962
963 if (wq->state != IDXD_WQ_DISABLED)
964 return -EPERM;
965
966 if (prio > IDXD_MAX_PRIORITY)
967 return -EINVAL;
968
969 wq->priority = prio;
970 return count;
971}
972
973static struct device_attribute dev_attr_wq_priority =
974 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
975
976static ssize_t wq_type_show(struct device *dev,
977 struct device_attribute *attr, char *buf)
978{
979 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
980
981 switch (wq->type) {
982 case IDXD_WQT_KERNEL:
983 return sprintf(buf, "%s\n",
984 idxd_wq_type_names[IDXD_WQT_KERNEL]);
985 case IDXD_WQT_USER:
986 return sprintf(buf, "%s\n",
987 idxd_wq_type_names[IDXD_WQT_USER]);
988 case IDXD_WQT_NONE:
989 default:
990 return sprintf(buf, "%s\n",
991 idxd_wq_type_names[IDXD_WQT_NONE]);
992 }
993
994 return -EINVAL;
995}
996
997static ssize_t wq_type_store(struct device *dev,
998 struct device_attribute *attr, const char *buf,
999 size_t count)
1000{
1001 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1002 enum idxd_wq_type old_type;
1003
1004 if (wq->state != IDXD_WQ_DISABLED)
1005 return -EPERM;
1006
1007 old_type = wq->type;
1008 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1009 wq->type = IDXD_WQT_NONE;
1010 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1011 wq->type = IDXD_WQT_KERNEL;
1012 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1013 wq->type = IDXD_WQT_USER;
1014 else
1015 return -EINVAL;
1016
1017 /* If we are changing queue type, clear the name */
1018 if (wq->type != old_type)
1019 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1020
1021 return count;
1022}
1023
1024static struct device_attribute dev_attr_wq_type =
1025 __ATTR(type, 0644, wq_type_show, wq_type_store);
1026
1027static ssize_t wq_name_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1029{
1030 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1031
1032 return sprintf(buf, "%s\n", wq->name);
1033}
1034
1035static ssize_t wq_name_store(struct device *dev,
1036 struct device_attribute *attr, const char *buf,
1037 size_t count)
1038{
1039 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1040
1041 if (wq->state != IDXD_WQ_DISABLED)
1042 return -EPERM;
1043
1044 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1045 return -EINVAL;
1046
1047 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1048 strncpy(wq->name, buf, WQ_NAME_SIZE);
1049 strreplace(wq->name, '\n', '\0');
1050 return count;
1051}
1052
1053static struct device_attribute dev_attr_wq_name =
1054 __ATTR(name, 0644, wq_name_show, wq_name_store);
1055
1056static ssize_t wq_cdev_minor_show(struct device *dev,
1057 struct device_attribute *attr, char *buf)
1058{
1059 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1060
1061 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1062}
1063
1064static struct device_attribute dev_attr_wq_cdev_minor =
1065 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1066
1067static struct attribute *idxd_wq_attributes[] = {
1068 &dev_attr_wq_clients.attr,
1069 &dev_attr_wq_state.attr,
1070 &dev_attr_wq_group_id.attr,
1071 &dev_attr_wq_mode.attr,
1072 &dev_attr_wq_size.attr,
1073 &dev_attr_wq_priority.attr,
1074 &dev_attr_wq_type.attr,
1075 &dev_attr_wq_name.attr,
1076 &dev_attr_wq_cdev_minor.attr,
1077 NULL,
1078};
1079
1080static const struct attribute_group idxd_wq_attribute_group = {
1081 .attrs = idxd_wq_attributes,
1082};
1083
1084static const struct attribute_group *idxd_wq_attribute_groups[] = {
1085 &idxd_wq_attribute_group,
1086 NULL,
1087};
1088
1089/* IDXD device attribs */
1090static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1091 char *buf)
1092{
1093 struct idxd_device *idxd =
1094 container_of(dev, struct idxd_device, conf_dev);
1095
1096 return sprintf(buf, "%#x\n", idxd->hw.version);
1097}
1098static DEVICE_ATTR_RO(version);
1099
1100static ssize_t max_work_queues_size_show(struct device *dev,
1101 struct device_attribute *attr,
1102 char *buf)
1103{
1104 struct idxd_device *idxd =
1105 container_of(dev, struct idxd_device, conf_dev);
1106
1107 return sprintf(buf, "%u\n", idxd->max_wq_size);
1108}
1109static DEVICE_ATTR_RO(max_work_queues_size);
1110
1111static ssize_t max_groups_show(struct device *dev,
1112 struct device_attribute *attr, char *buf)
1113{
1114 struct idxd_device *idxd =
1115 container_of(dev, struct idxd_device, conf_dev);
1116
1117 return sprintf(buf, "%u\n", idxd->max_groups);
1118}
1119static DEVICE_ATTR_RO(max_groups);
1120
1121static ssize_t max_work_queues_show(struct device *dev,
1122 struct device_attribute *attr, char *buf)
1123{
1124 struct idxd_device *idxd =
1125 container_of(dev, struct idxd_device, conf_dev);
1126
1127 return sprintf(buf, "%u\n", idxd->max_wqs);
1128}
1129static DEVICE_ATTR_RO(max_work_queues);
1130
1131static ssize_t max_engines_show(struct device *dev,
1132 struct device_attribute *attr, char *buf)
1133{
1134 struct idxd_device *idxd =
1135 container_of(dev, struct idxd_device, conf_dev);
1136
1137 return sprintf(buf, "%u\n", idxd->max_engines);
1138}
1139static DEVICE_ATTR_RO(max_engines);
1140
1141static ssize_t numa_node_show(struct device *dev,
1142 struct device_attribute *attr, char *buf)
1143{
1144 struct idxd_device *idxd =
1145 container_of(dev, struct idxd_device, conf_dev);
1146
1147 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1148}
1149static DEVICE_ATTR_RO(numa_node);
1150
1151static ssize_t max_batch_size_show(struct device *dev,
1152 struct device_attribute *attr, char *buf)
1153{
1154 struct idxd_device *idxd =
1155 container_of(dev, struct idxd_device, conf_dev);
1156
1157 return sprintf(buf, "%u\n", idxd->max_batch_size);
1158}
1159static DEVICE_ATTR_RO(max_batch_size);
1160
1161static ssize_t max_transfer_size_show(struct device *dev,
1162 struct device_attribute *attr,
1163 char *buf)
1164{
1165 struct idxd_device *idxd =
1166 container_of(dev, struct idxd_device, conf_dev);
1167
1168 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1169}
1170static DEVICE_ATTR_RO(max_transfer_size);
1171
1172static ssize_t op_cap_show(struct device *dev,
1173 struct device_attribute *attr, char *buf)
1174{
1175 struct idxd_device *idxd =
1176 container_of(dev, struct idxd_device, conf_dev);
1177
1178 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1179}
1180static DEVICE_ATTR_RO(op_cap);
1181
1182static ssize_t gen_cap_show(struct device *dev,
1183 struct device_attribute *attr, char *buf)
1184{
1185 struct idxd_device *idxd =
1186 container_of(dev, struct idxd_device, conf_dev);
1187
1188 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1189}
1190static DEVICE_ATTR_RO(gen_cap);
1191
1192static ssize_t configurable_show(struct device *dev,
1193 struct device_attribute *attr, char *buf)
1194{
1195 struct idxd_device *idxd =
1196 container_of(dev, struct idxd_device, conf_dev);
1197
1198 return sprintf(buf, "%u\n",
1199 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1200}
1201static DEVICE_ATTR_RO(configurable);
1202
1203static ssize_t clients_show(struct device *dev,
1204 struct device_attribute *attr, char *buf)
1205{
1206 struct idxd_device *idxd =
1207 container_of(dev, struct idxd_device, conf_dev);
1208 unsigned long flags;
1209 int count = 0, i;
1210
1211 spin_lock_irqsave(&idxd->dev_lock, flags);
1212 for (i = 0; i < idxd->max_wqs; i++) {
1213 struct idxd_wq *wq = &idxd->wqs[i];
1214
1215 count += wq->client_count;
1216 }
1217 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1218
1219 return sprintf(buf, "%d\n", count);
1220}
1221static DEVICE_ATTR_RO(clients);
1222
1223static ssize_t state_show(struct device *dev,
1224 struct device_attribute *attr, char *buf)
1225{
1226 struct idxd_device *idxd =
1227 container_of(dev, struct idxd_device, conf_dev);
1228
1229 switch (idxd->state) {
1230 case IDXD_DEV_DISABLED:
1231 case IDXD_DEV_CONF_READY:
1232 return sprintf(buf, "disabled\n");
1233 case IDXD_DEV_ENABLED:
1234 return sprintf(buf, "enabled\n");
1235 case IDXD_DEV_HALTED:
1236 return sprintf(buf, "halted\n");
1237 }
1238
1239 return sprintf(buf, "unknown\n");
1240}
1241static DEVICE_ATTR_RO(state);
1242
1243static ssize_t errors_show(struct device *dev,
1244 struct device_attribute *attr, char *buf)
1245{
1246 struct idxd_device *idxd =
1247 container_of(dev, struct idxd_device, conf_dev);
1248 int i, out = 0;
1249 unsigned long flags;
1250
1251 spin_lock_irqsave(&idxd->dev_lock, flags);
1252 for (i = 0; i < 4; i++)
1253 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1254 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1255 out--;
1256 out += sprintf(buf + out, "\n");
1257 return out;
1258}
1259static DEVICE_ATTR_RO(errors);
1260
1261static ssize_t max_tokens_show(struct device *dev,
1262 struct device_attribute *attr, char *buf)
1263{
1264 struct idxd_device *idxd =
1265 container_of(dev, struct idxd_device, conf_dev);
1266
1267 return sprintf(buf, "%u\n", idxd->max_tokens);
1268}
1269static DEVICE_ATTR_RO(max_tokens);
1270
1271static ssize_t token_limit_show(struct device *dev,
1272 struct device_attribute *attr, char *buf)
1273{
1274 struct idxd_device *idxd =
1275 container_of(dev, struct idxd_device, conf_dev);
1276
1277 return sprintf(buf, "%u\n", idxd->token_limit);
1278}
1279
1280static ssize_t token_limit_store(struct device *dev,
1281 struct device_attribute *attr,
1282 const char *buf, size_t count)
1283{
1284 struct idxd_device *idxd =
1285 container_of(dev, struct idxd_device, conf_dev);
1286 unsigned long val;
1287 int rc;
1288
1289 rc = kstrtoul(buf, 10, &val);
1290 if (rc < 0)
1291 return -EINVAL;
1292
1293 if (idxd->state == IDXD_DEV_ENABLED)
1294 return -EPERM;
1295
1296 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1297 return -EPERM;
1298
1299 if (!idxd->hw.group_cap.token_limit)
1300 return -EPERM;
1301
1302 if (val > idxd->hw.group_cap.total_tokens)
1303 return -EINVAL;
1304
1305 idxd->token_limit = val;
1306 return count;
1307}
1308static DEVICE_ATTR_RW(token_limit);
1309
1310static ssize_t cdev_major_show(struct device *dev,
1311 struct device_attribute *attr, char *buf)
1312{
1313 struct idxd_device *idxd =
1314 container_of(dev, struct idxd_device, conf_dev);
1315
1316 return sprintf(buf, "%u\n", idxd->major);
1317}
1318static DEVICE_ATTR_RO(cdev_major);
1319
1320static struct attribute *idxd_device_attributes[] = {
1321 &dev_attr_version.attr,
1322 &dev_attr_max_groups.attr,
1323 &dev_attr_max_work_queues.attr,
1324 &dev_attr_max_work_queues_size.attr,
1325 &dev_attr_max_engines.attr,
1326 &dev_attr_numa_node.attr,
1327 &dev_attr_max_batch_size.attr,
1328 &dev_attr_max_transfer_size.attr,
1329 &dev_attr_op_cap.attr,
1330 &dev_attr_gen_cap.attr,
1331 &dev_attr_configurable.attr,
1332 &dev_attr_clients.attr,
1333 &dev_attr_state.attr,
1334 &dev_attr_errors.attr,
1335 &dev_attr_max_tokens.attr,
1336 &dev_attr_token_limit.attr,
1337 &dev_attr_cdev_major.attr,
1338 NULL,
1339};
1340
1341static const struct attribute_group idxd_device_attribute_group = {
1342 .attrs = idxd_device_attributes,
1343};
1344
1345static const struct attribute_group *idxd_attribute_groups[] = {
1346 &idxd_device_attribute_group,
1347 NULL,
1348};
1349
1350static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1351{
1352 struct device *dev = &idxd->pdev->dev;
1353 int i, rc;
1354
1355 for (i = 0; i < idxd->max_engines; i++) {
1356 struct idxd_engine *engine = &idxd->engines[i];
1357
1358 engine->conf_dev.parent = &idxd->conf_dev;
1359 dev_set_name(&engine->conf_dev, "engine%d.%d",
1360 idxd->id, engine->id);
1361 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1362 engine->conf_dev.groups = idxd_engine_attribute_groups;
1363 engine->conf_dev.type = &idxd_engine_device_type;
1364 dev_dbg(dev, "Engine device register: %s\n",
1365 dev_name(&engine->conf_dev));
1366 rc = device_register(&engine->conf_dev);
1367 if (rc < 0) {
1368 put_device(&engine->conf_dev);
1369 goto cleanup;
1370 }
1371 }
1372
1373 return 0;
1374
1375cleanup:
1376 while (i--) {
1377 struct idxd_engine *engine = &idxd->engines[i];
1378
1379 device_unregister(&engine->conf_dev);
1380 }
1381 return rc;
1382}
1383
1384static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1385{
1386 struct device *dev = &idxd->pdev->dev;
1387 int i, rc;
1388
1389 for (i = 0; i < idxd->max_groups; i++) {
1390 struct idxd_group *group = &idxd->groups[i];
1391
1392 group->conf_dev.parent = &idxd->conf_dev;
1393 dev_set_name(&group->conf_dev, "group%d.%d",
1394 idxd->id, group->id);
1395 group->conf_dev.bus = idxd_get_bus_type(idxd);
1396 group->conf_dev.groups = idxd_group_attribute_groups;
1397 group->conf_dev.type = &idxd_group_device_type;
1398 dev_dbg(dev, "Group device register: %s\n",
1399 dev_name(&group->conf_dev));
1400 rc = device_register(&group->conf_dev);
1401 if (rc < 0) {
1402 put_device(&group->conf_dev);
1403 goto cleanup;
1404 }
1405 }
1406
1407 return 0;
1408
1409cleanup:
1410 while (i--) {
1411 struct idxd_group *group = &idxd->groups[i];
1412
1413 device_unregister(&group->conf_dev);
1414 }
1415 return rc;
1416}
1417
1418static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1419{
1420 struct device *dev = &idxd->pdev->dev;
1421 int i, rc;
1422
1423 for (i = 0; i < idxd->max_wqs; i++) {
1424 struct idxd_wq *wq = &idxd->wqs[i];
1425
1426 wq->conf_dev.parent = &idxd->conf_dev;
1427 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1428 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1429 wq->conf_dev.groups = idxd_wq_attribute_groups;
1430 wq->conf_dev.type = &idxd_wq_device_type;
1431 dev_dbg(dev, "WQ device register: %s\n",
1432 dev_name(&wq->conf_dev));
1433 rc = device_register(&wq->conf_dev);
1434 if (rc < 0) {
1435 put_device(&wq->conf_dev);
1436 goto cleanup;
1437 }
1438 }
1439
1440 return 0;
1441
1442cleanup:
1443 while (i--) {
1444 struct idxd_wq *wq = &idxd->wqs[i];
1445
1446 device_unregister(&wq->conf_dev);
1447 }
1448 return rc;
1449}
1450
1451static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1452{
1453 struct device *dev = &idxd->pdev->dev;
1454 int rc;
1455 char devname[IDXD_NAME_SIZE];
1456
1457 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1458 idxd->conf_dev.parent = dev;
1459 dev_set_name(&idxd->conf_dev, "%s", devname);
1460 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1461 idxd->conf_dev.groups = idxd_attribute_groups;
1462 idxd->conf_dev.type = idxd_get_device_type(idxd);
1463
1464 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1465 rc = device_register(&idxd->conf_dev);
1466 if (rc < 0) {
1467 put_device(&idxd->conf_dev);
1468 return rc;
1469 }
1470
1471 return 0;
1472}
1473
1474int idxd_setup_sysfs(struct idxd_device *idxd)
1475{
1476 struct device *dev = &idxd->pdev->dev;
1477 int rc;
1478
1479 rc = idxd_setup_device_sysfs(idxd);
1480 if (rc < 0) {
1481 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1482 return rc;
1483 }
1484
1485 rc = idxd_setup_wq_sysfs(idxd);
1486 if (rc < 0) {
1487 /* unregister conf dev */
1488 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1489 return rc;
1490 }
1491
1492 rc = idxd_setup_group_sysfs(idxd);
1493 if (rc < 0) {
1494 /* unregister conf dev */
1495 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1496 return rc;
1497 }
1498
1499 rc = idxd_setup_engine_sysfs(idxd);
1500 if (rc < 0) {
1501 /* unregister conf dev */
1502 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1503 return rc;
1504 }
1505
1506 return 0;
1507}
1508
1509void idxd_cleanup_sysfs(struct idxd_device *idxd)
1510{
1511 int i;
1512
1513 for (i = 0; i < idxd->max_wqs; i++) {
1514 struct idxd_wq *wq = &idxd->wqs[i];
1515
1516 device_unregister(&wq->conf_dev);
1517 }
1518
1519 for (i = 0; i < idxd->max_engines; i++) {
1520 struct idxd_engine *engine = &idxd->engines[i];
1521
1522 device_unregister(&engine->conf_dev);
1523 }
1524
1525 for (i = 0; i < idxd->max_groups; i++) {
1526 struct idxd_group *group = &idxd->groups[i];
1527
1528 device_unregister(&group->conf_dev);
1529 }
1530
1531 device_unregister(&idxd->conf_dev);
1532}
1533
1534int idxd_register_bus_type(void)
1535{
1536 int i, rc;
1537
1538 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1539 rc = bus_register(idxd_bus_types[i]);
1540 if (rc < 0)
1541 goto bus_err;
1542 }
1543
1544 return 0;
1545
1546bus_err:
1547 for (; i > 0; i--)
1548 bus_unregister(idxd_bus_types[i]);
1549 return rc;
1550}
1551
1552void idxd_unregister_bus_type(void)
1553{
1554 int i;
1555
1556 for (i = 0; i < IDXD_TYPE_MAX; i++)
1557 bus_unregister(idxd_bus_types[i]);
1558}