Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17};
18
19static void idxd_conf_device_release(struct device *dev)
20{
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22}
23
24static struct device_type idxd_group_device_type = {
25 .name = "group",
26 .release = idxd_conf_device_release,
27};
28
29static struct device_type idxd_wq_device_type = {
30 .name = "wq",
31 .release = idxd_conf_device_release,
32};
33
34static struct device_type idxd_engine_device_type = {
35 .name = "engine",
36 .release = idxd_conf_device_release,
37};
38
39static struct device_type dsa_device_type = {
40 .name = "dsa",
41 .release = idxd_conf_device_release,
42};
43
44static inline bool is_dsa_dev(struct device *dev)
45{
46 return dev ? dev->type == &dsa_device_type : false;
47}
48
49static inline bool is_idxd_dev(struct device *dev)
50{
51 return is_dsa_dev(dev);
52}
53
54static inline bool is_idxd_wq_dev(struct device *dev)
55{
56 return dev ? dev->type == &idxd_wq_device_type : false;
57}
58
59static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60{
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
63 return true;
64 return false;
65}
66
67static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68{
69 return wq->type == IDXD_WQT_USER;
70}
71
72static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
74{
75 int matched = 0;
76
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
79
80 if (idxd->state != IDXD_DEV_CONF_READY)
81 return 0;
82 matched = 1;
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
86
87 if (idxd->state < IDXD_DEV_CONF_READY)
88 return 0;
89
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 return 0;
93 }
94 matched = 1;
95 }
96
97 if (matched)
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
99
100 return matched;
101}
102
103static int idxd_config_bus_probe(struct device *dev)
104{
105 int rc;
106 unsigned long flags;
107
108 dev_dbg(dev, "%s called\n", __func__);
109
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
112
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
115 return -EBUSY;
116 }
117
118 if (!try_module_get(THIS_MODULE))
119 return -ENXIO;
120
121 spin_lock_irqsave(&idxd->dev_lock, flags);
122
123 /* Perform IDXD configuration and enabling */
124 rc = idxd_device_config(idxd);
125 if (rc < 0) {
126 spin_unlock_irqrestore(&idxd->dev_lock, flags);
127 module_put(THIS_MODULE);
128 dev_warn(dev, "Device config failed: %d\n", rc);
129 return rc;
130 }
131
132 /* start device */
133 rc = idxd_device_enable(idxd);
134 if (rc < 0) {
135 spin_unlock_irqrestore(&idxd->dev_lock, flags);
136 module_put(THIS_MODULE);
137 dev_warn(dev, "Device enable failed: %d\n", rc);
138 return rc;
139 }
140
141 spin_unlock_irqrestore(&idxd->dev_lock, flags);
142 dev_info(dev, "Device %s enabled\n", dev_name(dev));
143
144 rc = idxd_register_dma_device(idxd);
145 if (rc < 0) {
146 spin_unlock_irqrestore(&idxd->dev_lock, flags);
147 module_put(THIS_MODULE);
148 dev_dbg(dev, "Failed to register dmaengine device\n");
149 return rc;
150 }
151 return 0;
152 } else if (is_idxd_wq_dev(dev)) {
153 struct idxd_wq *wq = confdev_to_wq(dev);
154 struct idxd_device *idxd = wq->idxd;
155
156 mutex_lock(&wq->wq_lock);
157
158 if (idxd->state != IDXD_DEV_ENABLED) {
159 mutex_unlock(&wq->wq_lock);
160 dev_warn(dev, "Enabling while device not enabled.\n");
161 return -EPERM;
162 }
163
164 if (wq->state != IDXD_WQ_DISABLED) {
165 mutex_unlock(&wq->wq_lock);
166 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
167 return -EBUSY;
168 }
169
170 if (!wq->group) {
171 mutex_unlock(&wq->wq_lock);
172 dev_warn(dev, "WQ not attached to group.\n");
173 return -EINVAL;
174 }
175
176 if (strlen(wq->name) == 0) {
177 mutex_unlock(&wq->wq_lock);
178 dev_warn(dev, "WQ name not set.\n");
179 return -EINVAL;
180 }
181
182 rc = idxd_wq_alloc_resources(wq);
183 if (rc < 0) {
184 mutex_unlock(&wq->wq_lock);
185 dev_warn(dev, "WQ resource alloc failed\n");
186 return rc;
187 }
188
189 spin_lock_irqsave(&idxd->dev_lock, flags);
190 rc = idxd_device_config(idxd);
191 if (rc < 0) {
192 spin_unlock_irqrestore(&idxd->dev_lock, flags);
193 mutex_unlock(&wq->wq_lock);
194 dev_warn(dev, "Writing WQ %d config failed: %d\n",
195 wq->id, rc);
196 return rc;
197 }
198
199 rc = idxd_wq_enable(wq);
200 if (rc < 0) {
201 spin_unlock_irqrestore(&idxd->dev_lock, flags);
202 mutex_unlock(&wq->wq_lock);
203 dev_warn(dev, "WQ %d enabling failed: %d\n",
204 wq->id, rc);
205 return rc;
206 }
207 spin_unlock_irqrestore(&idxd->dev_lock, flags);
208
209 rc = idxd_wq_map_portal(wq);
210 if (rc < 0) {
211 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
212 rc = idxd_wq_disable(wq);
213 if (rc < 0)
214 dev_warn(dev, "IDXD wq disable failed\n");
215 spin_unlock_irqrestore(&idxd->dev_lock, flags);
216 mutex_unlock(&wq->wq_lock);
217 return rc;
218 }
219
220 wq->client_count = 0;
221
222 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
223
224 if (is_idxd_wq_dmaengine(wq)) {
225 rc = idxd_register_dma_channel(wq);
226 if (rc < 0) {
227 dev_dbg(dev, "DMA channel register failed\n");
228 mutex_unlock(&wq->wq_lock);
229 return rc;
230 }
231 } else if (is_idxd_wq_cdev(wq)) {
232 rc = idxd_wq_add_cdev(wq);
233 if (rc < 0) {
234 dev_dbg(dev, "Cdev creation failed\n");
235 mutex_unlock(&wq->wq_lock);
236 return rc;
237 }
238 }
239
240 mutex_unlock(&wq->wq_lock);
241 return 0;
242 }
243
244 return -ENODEV;
245}
246
247static void disable_wq(struct idxd_wq *wq)
248{
249 struct idxd_device *idxd = wq->idxd;
250 struct device *dev = &idxd->pdev->dev;
251 unsigned long flags;
252 int rc;
253
254 mutex_lock(&wq->wq_lock);
255 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
256 if (wq->state == IDXD_WQ_DISABLED) {
257 mutex_unlock(&wq->wq_lock);
258 return;
259 }
260
261 if (is_idxd_wq_dmaengine(wq))
262 idxd_unregister_dma_channel(wq);
263 else if (is_idxd_wq_cdev(wq))
264 idxd_wq_del_cdev(wq);
265
266 if (idxd_wq_refcount(wq))
267 dev_warn(dev, "Clients has claim on wq %d: %d\n",
268 wq->id, idxd_wq_refcount(wq));
269
270 idxd_wq_unmap_portal(wq);
271
272 spin_lock_irqsave(&idxd->dev_lock, flags);
273 rc = idxd_wq_disable(wq);
274 spin_unlock_irqrestore(&idxd->dev_lock, flags);
275
276 idxd_wq_free_resources(wq);
277 wq->client_count = 0;
278 mutex_unlock(&wq->wq_lock);
279
280 if (rc < 0)
281 dev_warn(dev, "Failed to disable %s: %d\n",
282 dev_name(&wq->conf_dev), rc);
283 else
284 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
285}
286
287static int idxd_config_bus_remove(struct device *dev)
288{
289 int rc;
290 unsigned long flags;
291
292 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
293
294 /* disable workqueue here */
295 if (is_idxd_wq_dev(dev)) {
296 struct idxd_wq *wq = confdev_to_wq(dev);
297
298 disable_wq(wq);
299 } else if (is_idxd_dev(dev)) {
300 struct idxd_device *idxd = confdev_to_idxd(dev);
301 int i;
302
303 dev_dbg(dev, "%s removing dev %s\n", __func__,
304 dev_name(&idxd->conf_dev));
305 for (i = 0; i < idxd->max_wqs; i++) {
306 struct idxd_wq *wq = &idxd->wqs[i];
307
308 if (wq->state == IDXD_WQ_DISABLED)
309 continue;
310 dev_warn(dev, "Active wq %d on disable %s.\n", i,
311 dev_name(&idxd->conf_dev));
312 device_release_driver(&wq->conf_dev);
313 }
314
315 idxd_unregister_dma_device(idxd);
316 spin_lock_irqsave(&idxd->dev_lock, flags);
317 rc = idxd_device_disable(idxd);
318 spin_unlock_irqrestore(&idxd->dev_lock, flags);
319 module_put(THIS_MODULE);
320 if (rc < 0)
321 dev_warn(dev, "Device disable failed\n");
322 else
323 dev_info(dev, "Device %s disabled\n", dev_name(dev));
324
325 }
326
327 return 0;
328}
329
330static void idxd_config_bus_shutdown(struct device *dev)
331{
332 dev_dbg(dev, "%s called\n", __func__);
333}
334
335struct bus_type dsa_bus_type = {
336 .name = "dsa",
337 .match = idxd_config_bus_match,
338 .probe = idxd_config_bus_probe,
339 .remove = idxd_config_bus_remove,
340 .shutdown = idxd_config_bus_shutdown,
341};
342
343static struct bus_type *idxd_bus_types[] = {
344 &dsa_bus_type
345};
346
347static struct idxd_device_driver dsa_drv = {
348 .drv = {
349 .name = "dsa",
350 .bus = &dsa_bus_type,
351 .owner = THIS_MODULE,
352 .mod_name = KBUILD_MODNAME,
353 },
354};
355
356static struct idxd_device_driver *idxd_drvs[] = {
357 &dsa_drv
358};
359
360struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
361{
362 return idxd_bus_types[idxd->type];
363}
364
365static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
366{
367 if (idxd->type == IDXD_TYPE_DSA)
368 return &dsa_device_type;
369 else
370 return NULL;
371}
372
373/* IDXD generic driver setup */
374int idxd_register_driver(void)
375{
376 int i, rc;
377
378 for (i = 0; i < IDXD_TYPE_MAX; i++) {
379 rc = driver_register(&idxd_drvs[i]->drv);
380 if (rc < 0)
381 goto drv_fail;
382 }
383
384 return 0;
385
386drv_fail:
387 for (; i > 0; i--)
388 driver_unregister(&idxd_drvs[i]->drv);
389 return rc;
390}
391
392void idxd_unregister_driver(void)
393{
394 int i;
395
396 for (i = 0; i < IDXD_TYPE_MAX; i++)
397 driver_unregister(&idxd_drvs[i]->drv);
398}
399
400/* IDXD engine attributes */
401static ssize_t engine_group_id_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
403{
404 struct idxd_engine *engine =
405 container_of(dev, struct idxd_engine, conf_dev);
406
407 if (engine->group)
408 return sprintf(buf, "%d\n", engine->group->id);
409 else
410 return sprintf(buf, "%d\n", -1);
411}
412
413static ssize_t engine_group_id_store(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
416{
417 struct idxd_engine *engine =
418 container_of(dev, struct idxd_engine, conf_dev);
419 struct idxd_device *idxd = engine->idxd;
420 long id;
421 int rc;
422 struct idxd_group *prevg;
423
424 rc = kstrtol(buf, 10, &id);
425 if (rc < 0)
426 return -EINVAL;
427
428 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
429 return -EPERM;
430
431 if (id > idxd->max_groups - 1 || id < -1)
432 return -EINVAL;
433
434 if (id == -1) {
435 if (engine->group) {
436 engine->group->num_engines--;
437 engine->group = NULL;
438 }
439 return count;
440 }
441
442 prevg = engine->group;
443
444 if (prevg)
445 prevg->num_engines--;
446 engine->group = &idxd->groups[id];
447 engine->group->num_engines++;
448
449 return count;
450}
451
452static struct device_attribute dev_attr_engine_group =
453 __ATTR(group_id, 0644, engine_group_id_show,
454 engine_group_id_store);
455
456static struct attribute *idxd_engine_attributes[] = {
457 &dev_attr_engine_group.attr,
458 NULL,
459};
460
461static const struct attribute_group idxd_engine_attribute_group = {
462 .attrs = idxd_engine_attributes,
463};
464
465static const struct attribute_group *idxd_engine_attribute_groups[] = {
466 &idxd_engine_attribute_group,
467 NULL,
468};
469
470/* Group attributes */
471
472static void idxd_set_free_tokens(struct idxd_device *idxd)
473{
474 int i, tokens;
475
476 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
477 struct idxd_group *g = &idxd->groups[i];
478
479 tokens += g->tokens_reserved;
480 }
481
482 idxd->nr_tokens = idxd->max_tokens - tokens;
483}
484
485static ssize_t group_tokens_reserved_show(struct device *dev,
486 struct device_attribute *attr,
487 char *buf)
488{
489 struct idxd_group *group =
490 container_of(dev, struct idxd_group, conf_dev);
491
492 return sprintf(buf, "%u\n", group->tokens_reserved);
493}
494
495static ssize_t group_tokens_reserved_store(struct device *dev,
496 struct device_attribute *attr,
497 const char *buf, size_t count)
498{
499 struct idxd_group *group =
500 container_of(dev, struct idxd_group, conf_dev);
501 struct idxd_device *idxd = group->idxd;
502 unsigned long val;
503 int rc;
504
505 rc = kstrtoul(buf, 10, &val);
506 if (rc < 0)
507 return -EINVAL;
508
509 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
510 return -EPERM;
511
512 if (idxd->state == IDXD_DEV_ENABLED)
513 return -EPERM;
514
515 if (val > idxd->max_tokens)
516 return -EINVAL;
517
518 if (val > idxd->nr_tokens + group->tokens_reserved)
519 return -EINVAL;
520
521 group->tokens_reserved = val;
522 idxd_set_free_tokens(idxd);
523 return count;
524}
525
526static struct device_attribute dev_attr_group_tokens_reserved =
527 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
528 group_tokens_reserved_store);
529
530static ssize_t group_tokens_allowed_show(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533{
534 struct idxd_group *group =
535 container_of(dev, struct idxd_group, conf_dev);
536
537 return sprintf(buf, "%u\n", group->tokens_allowed);
538}
539
540static ssize_t group_tokens_allowed_store(struct device *dev,
541 struct device_attribute *attr,
542 const char *buf, size_t count)
543{
544 struct idxd_group *group =
545 container_of(dev, struct idxd_group, conf_dev);
546 struct idxd_device *idxd = group->idxd;
547 unsigned long val;
548 int rc;
549
550 rc = kstrtoul(buf, 10, &val);
551 if (rc < 0)
552 return -EINVAL;
553
554 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
555 return -EPERM;
556
557 if (idxd->state == IDXD_DEV_ENABLED)
558 return -EPERM;
559
560 if (val < 4 * group->num_engines ||
561 val > group->tokens_reserved + idxd->nr_tokens)
562 return -EINVAL;
563
564 group->tokens_allowed = val;
565 return count;
566}
567
568static struct device_attribute dev_attr_group_tokens_allowed =
569 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
570 group_tokens_allowed_store);
571
572static ssize_t group_use_token_limit_show(struct device *dev,
573 struct device_attribute *attr,
574 char *buf)
575{
576 struct idxd_group *group =
577 container_of(dev, struct idxd_group, conf_dev);
578
579 return sprintf(buf, "%u\n", group->use_token_limit);
580}
581
582static ssize_t group_use_token_limit_store(struct device *dev,
583 struct device_attribute *attr,
584 const char *buf, size_t count)
585{
586 struct idxd_group *group =
587 container_of(dev, struct idxd_group, conf_dev);
588 struct idxd_device *idxd = group->idxd;
589 unsigned long val;
590 int rc;
591
592 rc = kstrtoul(buf, 10, &val);
593 if (rc < 0)
594 return -EINVAL;
595
596 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
597 return -EPERM;
598
599 if (idxd->state == IDXD_DEV_ENABLED)
600 return -EPERM;
601
602 if (idxd->token_limit == 0)
603 return -EPERM;
604
605 group->use_token_limit = !!val;
606 return count;
607}
608
609static struct device_attribute dev_attr_group_use_token_limit =
610 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
611 group_use_token_limit_store);
612
613static ssize_t group_engines_show(struct device *dev,
614 struct device_attribute *attr, char *buf)
615{
616 struct idxd_group *group =
617 container_of(dev, struct idxd_group, conf_dev);
618 int i, rc = 0;
619 char *tmp = buf;
620 struct idxd_device *idxd = group->idxd;
621
622 for (i = 0; i < idxd->max_engines; i++) {
623 struct idxd_engine *engine = &idxd->engines[i];
624
625 if (!engine->group)
626 continue;
627
628 if (engine->group->id == group->id)
629 rc += sprintf(tmp + rc, "engine%d.%d ",
630 idxd->id, engine->id);
631 }
632
633 rc--;
634 rc += sprintf(tmp + rc, "\n");
635
636 return rc;
637}
638
639static struct device_attribute dev_attr_group_engines =
640 __ATTR(engines, 0444, group_engines_show, NULL);
641
642static ssize_t group_work_queues_show(struct device *dev,
643 struct device_attribute *attr, char *buf)
644{
645 struct idxd_group *group =
646 container_of(dev, struct idxd_group, conf_dev);
647 int i, rc = 0;
648 char *tmp = buf;
649 struct idxd_device *idxd = group->idxd;
650
651 for (i = 0; i < idxd->max_wqs; i++) {
652 struct idxd_wq *wq = &idxd->wqs[i];
653
654 if (!wq->group)
655 continue;
656
657 if (wq->group->id == group->id)
658 rc += sprintf(tmp + rc, "wq%d.%d ",
659 idxd->id, wq->id);
660 }
661
662 rc--;
663 rc += sprintf(tmp + rc, "\n");
664
665 return rc;
666}
667
668static struct device_attribute dev_attr_group_work_queues =
669 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
670
671static ssize_t group_traffic_class_a_show(struct device *dev,
672 struct device_attribute *attr,
673 char *buf)
674{
675 struct idxd_group *group =
676 container_of(dev, struct idxd_group, conf_dev);
677
678 return sprintf(buf, "%d\n", group->tc_a);
679}
680
681static ssize_t group_traffic_class_a_store(struct device *dev,
682 struct device_attribute *attr,
683 const char *buf, size_t count)
684{
685 struct idxd_group *group =
686 container_of(dev, struct idxd_group, conf_dev);
687 struct idxd_device *idxd = group->idxd;
688 long val;
689 int rc;
690
691 rc = kstrtol(buf, 10, &val);
692 if (rc < 0)
693 return -EINVAL;
694
695 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
696 return -EPERM;
697
698 if (idxd->state == IDXD_DEV_ENABLED)
699 return -EPERM;
700
701 if (val < 0 || val > 7)
702 return -EINVAL;
703
704 group->tc_a = val;
705 return count;
706}
707
708static struct device_attribute dev_attr_group_traffic_class_a =
709 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
710 group_traffic_class_a_store);
711
712static ssize_t group_traffic_class_b_show(struct device *dev,
713 struct device_attribute *attr,
714 char *buf)
715{
716 struct idxd_group *group =
717 container_of(dev, struct idxd_group, conf_dev);
718
719 return sprintf(buf, "%d\n", group->tc_b);
720}
721
722static ssize_t group_traffic_class_b_store(struct device *dev,
723 struct device_attribute *attr,
724 const char *buf, size_t count)
725{
726 struct idxd_group *group =
727 container_of(dev, struct idxd_group, conf_dev);
728 struct idxd_device *idxd = group->idxd;
729 long val;
730 int rc;
731
732 rc = kstrtol(buf, 10, &val);
733 if (rc < 0)
734 return -EINVAL;
735
736 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
737 return -EPERM;
738
739 if (idxd->state == IDXD_DEV_ENABLED)
740 return -EPERM;
741
742 if (val < 0 || val > 7)
743 return -EINVAL;
744
745 group->tc_b = val;
746 return count;
747}
748
749static struct device_attribute dev_attr_group_traffic_class_b =
750 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
751 group_traffic_class_b_store);
752
753static struct attribute *idxd_group_attributes[] = {
754 &dev_attr_group_work_queues.attr,
755 &dev_attr_group_engines.attr,
756 &dev_attr_group_use_token_limit.attr,
757 &dev_attr_group_tokens_allowed.attr,
758 &dev_attr_group_tokens_reserved.attr,
759 &dev_attr_group_traffic_class_a.attr,
760 &dev_attr_group_traffic_class_b.attr,
761 NULL,
762};
763
764static const struct attribute_group idxd_group_attribute_group = {
765 .attrs = idxd_group_attributes,
766};
767
768static const struct attribute_group *idxd_group_attribute_groups[] = {
769 &idxd_group_attribute_group,
770 NULL,
771};
772
773/* IDXD work queue attribs */
774static ssize_t wq_clients_show(struct device *dev,
775 struct device_attribute *attr, char *buf)
776{
777 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
778
779 return sprintf(buf, "%d\n", wq->client_count);
780}
781
782static struct device_attribute dev_attr_wq_clients =
783 __ATTR(clients, 0444, wq_clients_show, NULL);
784
785static ssize_t wq_state_show(struct device *dev,
786 struct device_attribute *attr, char *buf)
787{
788 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
789
790 switch (wq->state) {
791 case IDXD_WQ_DISABLED:
792 return sprintf(buf, "disabled\n");
793 case IDXD_WQ_ENABLED:
794 return sprintf(buf, "enabled\n");
795 }
796
797 return sprintf(buf, "unknown\n");
798}
799
800static struct device_attribute dev_attr_wq_state =
801 __ATTR(state, 0444, wq_state_show, NULL);
802
803static ssize_t wq_group_id_show(struct device *dev,
804 struct device_attribute *attr, char *buf)
805{
806 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
807
808 if (wq->group)
809 return sprintf(buf, "%u\n", wq->group->id);
810 else
811 return sprintf(buf, "-1\n");
812}
813
814static ssize_t wq_group_id_store(struct device *dev,
815 struct device_attribute *attr,
816 const char *buf, size_t count)
817{
818 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
819 struct idxd_device *idxd = wq->idxd;
820 long id;
821 int rc;
822 struct idxd_group *prevg, *group;
823
824 rc = kstrtol(buf, 10, &id);
825 if (rc < 0)
826 return -EINVAL;
827
828 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
829 return -EPERM;
830
831 if (wq->state != IDXD_WQ_DISABLED)
832 return -EPERM;
833
834 if (id > idxd->max_groups - 1 || id < -1)
835 return -EINVAL;
836
837 if (id == -1) {
838 if (wq->group) {
839 wq->group->num_wqs--;
840 wq->group = NULL;
841 }
842 return count;
843 }
844
845 group = &idxd->groups[id];
846 prevg = wq->group;
847
848 if (prevg)
849 prevg->num_wqs--;
850 wq->group = group;
851 group->num_wqs++;
852 return count;
853}
854
855static struct device_attribute dev_attr_wq_group_id =
856 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
857
858static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
859 char *buf)
860{
861 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
862
863 return sprintf(buf, "%s\n",
864 wq_dedicated(wq) ? "dedicated" : "shared");
865}
866
867static ssize_t wq_mode_store(struct device *dev,
868 struct device_attribute *attr, const char *buf,
869 size_t count)
870{
871 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
872 struct idxd_device *idxd = wq->idxd;
873
874 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
875 return -EPERM;
876
877 if (wq->state != IDXD_WQ_DISABLED)
878 return -EPERM;
879
880 if (sysfs_streq(buf, "dedicated")) {
881 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
882 wq->threshold = 0;
883 } else {
884 return -EINVAL;
885 }
886
887 return count;
888}
889
890static struct device_attribute dev_attr_wq_mode =
891 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
892
893static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
894 char *buf)
895{
896 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
897
898 return sprintf(buf, "%u\n", wq->size);
899}
900
901static int total_claimed_wq_size(struct idxd_device *idxd)
902{
903 int i;
904 int wq_size = 0;
905
906 for (i = 0; i < idxd->max_wqs; i++) {
907 struct idxd_wq *wq = &idxd->wqs[i];
908
909 wq_size += wq->size;
910 }
911
912 return wq_size;
913}
914
915static ssize_t wq_size_store(struct device *dev,
916 struct device_attribute *attr, const char *buf,
917 size_t count)
918{
919 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
920 unsigned long size;
921 struct idxd_device *idxd = wq->idxd;
922 int rc;
923
924 rc = kstrtoul(buf, 10, &size);
925 if (rc < 0)
926 return -EINVAL;
927
928 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
929 return -EPERM;
930
931 if (wq->state != IDXD_WQ_DISABLED)
932 return -EPERM;
933
934 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
935 return -EINVAL;
936
937 wq->size = size;
938 return count;
939}
940
941static struct device_attribute dev_attr_wq_size =
942 __ATTR(size, 0644, wq_size_show, wq_size_store);
943
944static ssize_t wq_priority_show(struct device *dev,
945 struct device_attribute *attr, char *buf)
946{
947 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
948
949 return sprintf(buf, "%u\n", wq->priority);
950}
951
952static ssize_t wq_priority_store(struct device *dev,
953 struct device_attribute *attr,
954 const char *buf, size_t count)
955{
956 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
957 unsigned long prio;
958 struct idxd_device *idxd = wq->idxd;
959 int rc;
960
961 rc = kstrtoul(buf, 10, &prio);
962 if (rc < 0)
963 return -EINVAL;
964
965 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
966 return -EPERM;
967
968 if (wq->state != IDXD_WQ_DISABLED)
969 return -EPERM;
970
971 if (prio > IDXD_MAX_PRIORITY)
972 return -EINVAL;
973
974 wq->priority = prio;
975 return count;
976}
977
978static struct device_attribute dev_attr_wq_priority =
979 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
980
981static ssize_t wq_type_show(struct device *dev,
982 struct device_attribute *attr, char *buf)
983{
984 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
985
986 switch (wq->type) {
987 case IDXD_WQT_KERNEL:
988 return sprintf(buf, "%s\n",
989 idxd_wq_type_names[IDXD_WQT_KERNEL]);
990 case IDXD_WQT_USER:
991 return sprintf(buf, "%s\n",
992 idxd_wq_type_names[IDXD_WQT_USER]);
993 case IDXD_WQT_NONE:
994 default:
995 return sprintf(buf, "%s\n",
996 idxd_wq_type_names[IDXD_WQT_NONE]);
997 }
998
999 return -EINVAL;
1000}
1001
1002static ssize_t wq_type_store(struct device *dev,
1003 struct device_attribute *attr, const char *buf,
1004 size_t count)
1005{
1006 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1007 enum idxd_wq_type old_type;
1008
1009 if (wq->state != IDXD_WQ_DISABLED)
1010 return -EPERM;
1011
1012 old_type = wq->type;
1013 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1014 wq->type = IDXD_WQT_NONE;
1015 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1016 wq->type = IDXD_WQT_KERNEL;
1017 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1018 wq->type = IDXD_WQT_USER;
1019 else
1020 return -EINVAL;
1021
1022 /* If we are changing queue type, clear the name */
1023 if (wq->type != old_type)
1024 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1025
1026 return count;
1027}
1028
1029static struct device_attribute dev_attr_wq_type =
1030 __ATTR(type, 0644, wq_type_show, wq_type_store);
1031
1032static ssize_t wq_name_show(struct device *dev,
1033 struct device_attribute *attr, char *buf)
1034{
1035 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1036
1037 return sprintf(buf, "%s\n", wq->name);
1038}
1039
1040static ssize_t wq_name_store(struct device *dev,
1041 struct device_attribute *attr, const char *buf,
1042 size_t count)
1043{
1044 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1045
1046 if (wq->state != IDXD_WQ_DISABLED)
1047 return -EPERM;
1048
1049 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1050 return -EINVAL;
1051
1052 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1053 strncpy(wq->name, buf, WQ_NAME_SIZE);
1054 strreplace(wq->name, '\n', '\0');
1055 return count;
1056}
1057
1058static struct device_attribute dev_attr_wq_name =
1059 __ATTR(name, 0644, wq_name_show, wq_name_store);
1060
1061static ssize_t wq_cdev_minor_show(struct device *dev,
1062 struct device_attribute *attr, char *buf)
1063{
1064 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1065
1066 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1067}
1068
1069static struct device_attribute dev_attr_wq_cdev_minor =
1070 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1071
1072static struct attribute *idxd_wq_attributes[] = {
1073 &dev_attr_wq_clients.attr,
1074 &dev_attr_wq_state.attr,
1075 &dev_attr_wq_group_id.attr,
1076 &dev_attr_wq_mode.attr,
1077 &dev_attr_wq_size.attr,
1078 &dev_attr_wq_priority.attr,
1079 &dev_attr_wq_type.attr,
1080 &dev_attr_wq_name.attr,
1081 &dev_attr_wq_cdev_minor.attr,
1082 NULL,
1083};
1084
1085static const struct attribute_group idxd_wq_attribute_group = {
1086 .attrs = idxd_wq_attributes,
1087};
1088
1089static const struct attribute_group *idxd_wq_attribute_groups[] = {
1090 &idxd_wq_attribute_group,
1091 NULL,
1092};
1093
1094/* IDXD device attribs */
1095static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1096 char *buf)
1097{
1098 struct idxd_device *idxd =
1099 container_of(dev, struct idxd_device, conf_dev);
1100
1101 return sprintf(buf, "%#x\n", idxd->hw.version);
1102}
1103static DEVICE_ATTR_RO(version);
1104
1105static ssize_t max_work_queues_size_show(struct device *dev,
1106 struct device_attribute *attr,
1107 char *buf)
1108{
1109 struct idxd_device *idxd =
1110 container_of(dev, struct idxd_device, conf_dev);
1111
1112 return sprintf(buf, "%u\n", idxd->max_wq_size);
1113}
1114static DEVICE_ATTR_RO(max_work_queues_size);
1115
1116static ssize_t max_groups_show(struct device *dev,
1117 struct device_attribute *attr, char *buf)
1118{
1119 struct idxd_device *idxd =
1120 container_of(dev, struct idxd_device, conf_dev);
1121
1122 return sprintf(buf, "%u\n", idxd->max_groups);
1123}
1124static DEVICE_ATTR_RO(max_groups);
1125
1126static ssize_t max_work_queues_show(struct device *dev,
1127 struct device_attribute *attr, char *buf)
1128{
1129 struct idxd_device *idxd =
1130 container_of(dev, struct idxd_device, conf_dev);
1131
1132 return sprintf(buf, "%u\n", idxd->max_wqs);
1133}
1134static DEVICE_ATTR_RO(max_work_queues);
1135
1136static ssize_t max_engines_show(struct device *dev,
1137 struct device_attribute *attr, char *buf)
1138{
1139 struct idxd_device *idxd =
1140 container_of(dev, struct idxd_device, conf_dev);
1141
1142 return sprintf(buf, "%u\n", idxd->max_engines);
1143}
1144static DEVICE_ATTR_RO(max_engines);
1145
1146static ssize_t numa_node_show(struct device *dev,
1147 struct device_attribute *attr, char *buf)
1148{
1149 struct idxd_device *idxd =
1150 container_of(dev, struct idxd_device, conf_dev);
1151
1152 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1153}
1154static DEVICE_ATTR_RO(numa_node);
1155
1156static ssize_t max_batch_size_show(struct device *dev,
1157 struct device_attribute *attr, char *buf)
1158{
1159 struct idxd_device *idxd =
1160 container_of(dev, struct idxd_device, conf_dev);
1161
1162 return sprintf(buf, "%u\n", idxd->max_batch_size);
1163}
1164static DEVICE_ATTR_RO(max_batch_size);
1165
1166static ssize_t max_transfer_size_show(struct device *dev,
1167 struct device_attribute *attr,
1168 char *buf)
1169{
1170 struct idxd_device *idxd =
1171 container_of(dev, struct idxd_device, conf_dev);
1172
1173 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1174}
1175static DEVICE_ATTR_RO(max_transfer_size);
1176
1177static ssize_t op_cap_show(struct device *dev,
1178 struct device_attribute *attr, char *buf)
1179{
1180 struct idxd_device *idxd =
1181 container_of(dev, struct idxd_device, conf_dev);
1182
1183 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1184}
1185static DEVICE_ATTR_RO(op_cap);
1186
1187static ssize_t gen_cap_show(struct device *dev,
1188 struct device_attribute *attr, char *buf)
1189{
1190 struct idxd_device *idxd =
1191 container_of(dev, struct idxd_device, conf_dev);
1192
1193 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1194}
1195static DEVICE_ATTR_RO(gen_cap);
1196
1197static ssize_t configurable_show(struct device *dev,
1198 struct device_attribute *attr, char *buf)
1199{
1200 struct idxd_device *idxd =
1201 container_of(dev, struct idxd_device, conf_dev);
1202
1203 return sprintf(buf, "%u\n",
1204 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1205}
1206static DEVICE_ATTR_RO(configurable);
1207
1208static ssize_t clients_show(struct device *dev,
1209 struct device_attribute *attr, char *buf)
1210{
1211 struct idxd_device *idxd =
1212 container_of(dev, struct idxd_device, conf_dev);
1213 unsigned long flags;
1214 int count = 0, i;
1215
1216 spin_lock_irqsave(&idxd->dev_lock, flags);
1217 for (i = 0; i < idxd->max_wqs; i++) {
1218 struct idxd_wq *wq = &idxd->wqs[i];
1219
1220 count += wq->client_count;
1221 }
1222 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1223
1224 return sprintf(buf, "%d\n", count);
1225}
1226static DEVICE_ATTR_RO(clients);
1227
1228static ssize_t state_show(struct device *dev,
1229 struct device_attribute *attr, char *buf)
1230{
1231 struct idxd_device *idxd =
1232 container_of(dev, struct idxd_device, conf_dev);
1233
1234 switch (idxd->state) {
1235 case IDXD_DEV_DISABLED:
1236 case IDXD_DEV_CONF_READY:
1237 return sprintf(buf, "disabled\n");
1238 case IDXD_DEV_ENABLED:
1239 return sprintf(buf, "enabled\n");
1240 case IDXD_DEV_HALTED:
1241 return sprintf(buf, "halted\n");
1242 }
1243
1244 return sprintf(buf, "unknown\n");
1245}
1246static DEVICE_ATTR_RO(state);
1247
1248static ssize_t errors_show(struct device *dev,
1249 struct device_attribute *attr, char *buf)
1250{
1251 struct idxd_device *idxd =
1252 container_of(dev, struct idxd_device, conf_dev);
1253 int i, out = 0;
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(&idxd->dev_lock, flags);
1257 for (i = 0; i < 4; i++)
1258 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1259 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1260 out--;
1261 out += sprintf(buf + out, "\n");
1262 return out;
1263}
1264static DEVICE_ATTR_RO(errors);
1265
1266static ssize_t max_tokens_show(struct device *dev,
1267 struct device_attribute *attr, char *buf)
1268{
1269 struct idxd_device *idxd =
1270 container_of(dev, struct idxd_device, conf_dev);
1271
1272 return sprintf(buf, "%u\n", idxd->max_tokens);
1273}
1274static DEVICE_ATTR_RO(max_tokens);
1275
1276static ssize_t token_limit_show(struct device *dev,
1277 struct device_attribute *attr, char *buf)
1278{
1279 struct idxd_device *idxd =
1280 container_of(dev, struct idxd_device, conf_dev);
1281
1282 return sprintf(buf, "%u\n", idxd->token_limit);
1283}
1284
1285static ssize_t token_limit_store(struct device *dev,
1286 struct device_attribute *attr,
1287 const char *buf, size_t count)
1288{
1289 struct idxd_device *idxd =
1290 container_of(dev, struct idxd_device, conf_dev);
1291 unsigned long val;
1292 int rc;
1293
1294 rc = kstrtoul(buf, 10, &val);
1295 if (rc < 0)
1296 return -EINVAL;
1297
1298 if (idxd->state == IDXD_DEV_ENABLED)
1299 return -EPERM;
1300
1301 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1302 return -EPERM;
1303
1304 if (!idxd->hw.group_cap.token_limit)
1305 return -EPERM;
1306
1307 if (val > idxd->hw.group_cap.total_tokens)
1308 return -EINVAL;
1309
1310 idxd->token_limit = val;
1311 return count;
1312}
1313static DEVICE_ATTR_RW(token_limit);
1314
1315static ssize_t cdev_major_show(struct device *dev,
1316 struct device_attribute *attr, char *buf)
1317{
1318 struct idxd_device *idxd =
1319 container_of(dev, struct idxd_device, conf_dev);
1320
1321 return sprintf(buf, "%u\n", idxd->major);
1322}
1323static DEVICE_ATTR_RO(cdev_major);
1324
1325static struct attribute *idxd_device_attributes[] = {
1326 &dev_attr_version.attr,
1327 &dev_attr_max_groups.attr,
1328 &dev_attr_max_work_queues.attr,
1329 &dev_attr_max_work_queues_size.attr,
1330 &dev_attr_max_engines.attr,
1331 &dev_attr_numa_node.attr,
1332 &dev_attr_max_batch_size.attr,
1333 &dev_attr_max_transfer_size.attr,
1334 &dev_attr_op_cap.attr,
1335 &dev_attr_gen_cap.attr,
1336 &dev_attr_configurable.attr,
1337 &dev_attr_clients.attr,
1338 &dev_attr_state.attr,
1339 &dev_attr_errors.attr,
1340 &dev_attr_max_tokens.attr,
1341 &dev_attr_token_limit.attr,
1342 &dev_attr_cdev_major.attr,
1343 NULL,
1344};
1345
1346static const struct attribute_group idxd_device_attribute_group = {
1347 .attrs = idxd_device_attributes,
1348};
1349
1350static const struct attribute_group *idxd_attribute_groups[] = {
1351 &idxd_device_attribute_group,
1352 NULL,
1353};
1354
1355static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1356{
1357 struct device *dev = &idxd->pdev->dev;
1358 int i, rc;
1359
1360 for (i = 0; i < idxd->max_engines; i++) {
1361 struct idxd_engine *engine = &idxd->engines[i];
1362
1363 engine->conf_dev.parent = &idxd->conf_dev;
1364 dev_set_name(&engine->conf_dev, "engine%d.%d",
1365 idxd->id, engine->id);
1366 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1367 engine->conf_dev.groups = idxd_engine_attribute_groups;
1368 engine->conf_dev.type = &idxd_engine_device_type;
1369 dev_dbg(dev, "Engine device register: %s\n",
1370 dev_name(&engine->conf_dev));
1371 rc = device_register(&engine->conf_dev);
1372 if (rc < 0) {
1373 put_device(&engine->conf_dev);
1374 goto cleanup;
1375 }
1376 }
1377
1378 return 0;
1379
1380cleanup:
1381 while (i--) {
1382 struct idxd_engine *engine = &idxd->engines[i];
1383
1384 device_unregister(&engine->conf_dev);
1385 }
1386 return rc;
1387}
1388
1389static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1390{
1391 struct device *dev = &idxd->pdev->dev;
1392 int i, rc;
1393
1394 for (i = 0; i < idxd->max_groups; i++) {
1395 struct idxd_group *group = &idxd->groups[i];
1396
1397 group->conf_dev.parent = &idxd->conf_dev;
1398 dev_set_name(&group->conf_dev, "group%d.%d",
1399 idxd->id, group->id);
1400 group->conf_dev.bus = idxd_get_bus_type(idxd);
1401 group->conf_dev.groups = idxd_group_attribute_groups;
1402 group->conf_dev.type = &idxd_group_device_type;
1403 dev_dbg(dev, "Group device register: %s\n",
1404 dev_name(&group->conf_dev));
1405 rc = device_register(&group->conf_dev);
1406 if (rc < 0) {
1407 put_device(&group->conf_dev);
1408 goto cleanup;
1409 }
1410 }
1411
1412 return 0;
1413
1414cleanup:
1415 while (i--) {
1416 struct idxd_group *group = &idxd->groups[i];
1417
1418 device_unregister(&group->conf_dev);
1419 }
1420 return rc;
1421}
1422
1423static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1424{
1425 struct device *dev = &idxd->pdev->dev;
1426 int i, rc;
1427
1428 for (i = 0; i < idxd->max_wqs; i++) {
1429 struct idxd_wq *wq = &idxd->wqs[i];
1430
1431 wq->conf_dev.parent = &idxd->conf_dev;
1432 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1433 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1434 wq->conf_dev.groups = idxd_wq_attribute_groups;
1435 wq->conf_dev.type = &idxd_wq_device_type;
1436 dev_dbg(dev, "WQ device register: %s\n",
1437 dev_name(&wq->conf_dev));
1438 rc = device_register(&wq->conf_dev);
1439 if (rc < 0) {
1440 put_device(&wq->conf_dev);
1441 goto cleanup;
1442 }
1443 }
1444
1445 return 0;
1446
1447cleanup:
1448 while (i--) {
1449 struct idxd_wq *wq = &idxd->wqs[i];
1450
1451 device_unregister(&wq->conf_dev);
1452 }
1453 return rc;
1454}
1455
1456static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1457{
1458 struct device *dev = &idxd->pdev->dev;
1459 int rc;
1460 char devname[IDXD_NAME_SIZE];
1461
1462 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1463 idxd->conf_dev.parent = dev;
1464 dev_set_name(&idxd->conf_dev, "%s", devname);
1465 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1466 idxd->conf_dev.groups = idxd_attribute_groups;
1467 idxd->conf_dev.type = idxd_get_device_type(idxd);
1468
1469 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1470 rc = device_register(&idxd->conf_dev);
1471 if (rc < 0) {
1472 put_device(&idxd->conf_dev);
1473 return rc;
1474 }
1475
1476 return 0;
1477}
1478
1479int idxd_setup_sysfs(struct idxd_device *idxd)
1480{
1481 struct device *dev = &idxd->pdev->dev;
1482 int rc;
1483
1484 rc = idxd_setup_device_sysfs(idxd);
1485 if (rc < 0) {
1486 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1487 return rc;
1488 }
1489
1490 rc = idxd_setup_wq_sysfs(idxd);
1491 if (rc < 0) {
1492 /* unregister conf dev */
1493 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1494 return rc;
1495 }
1496
1497 rc = idxd_setup_group_sysfs(idxd);
1498 if (rc < 0) {
1499 /* unregister conf dev */
1500 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1501 return rc;
1502 }
1503
1504 rc = idxd_setup_engine_sysfs(idxd);
1505 if (rc < 0) {
1506 /* unregister conf dev */
1507 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1508 return rc;
1509 }
1510
1511 return 0;
1512}
1513
1514void idxd_cleanup_sysfs(struct idxd_device *idxd)
1515{
1516 int i;
1517
1518 for (i = 0; i < idxd->max_wqs; i++) {
1519 struct idxd_wq *wq = &idxd->wqs[i];
1520
1521 device_unregister(&wq->conf_dev);
1522 }
1523
1524 for (i = 0; i < idxd->max_engines; i++) {
1525 struct idxd_engine *engine = &idxd->engines[i];
1526
1527 device_unregister(&engine->conf_dev);
1528 }
1529
1530 for (i = 0; i < idxd->max_groups; i++) {
1531 struct idxd_group *group = &idxd->groups[i];
1532
1533 device_unregister(&group->conf_dev);
1534 }
1535
1536 device_unregister(&idxd->conf_dev);
1537}
1538
1539int idxd_register_bus_type(void)
1540{
1541 int i, rc;
1542
1543 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1544 rc = bus_register(idxd_bus_types[i]);
1545 if (rc < 0)
1546 goto bus_err;
1547 }
1548
1549 return 0;
1550
1551bus_err:
1552 for (; i > 0; i--)
1553 bus_unregister(idxd_bus_types[i]);
1554 return rc;
1555}
1556
1557void idxd_unregister_bus_type(void)
1558{
1559 int i;
1560
1561 for (i = 0; i < IDXD_TYPE_MAX; i++)
1562 bus_unregister(idxd_bus_types[i]);
1563}