1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 static int idxd_config_bus_match(struct device
*dev
,
20 struct device_driver
*drv
)
24 if (is_idxd_dev(dev
)) {
25 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
27 if (idxd
->state
!= IDXD_DEV_CONF_READY
)
30 } else if (is_idxd_wq_dev(dev
)) {
31 struct idxd_wq
*wq
= confdev_to_wq(dev
);
32 struct idxd_device
*idxd
= wq
->idxd
;
34 if (idxd
->state
< IDXD_DEV_CONF_READY
)
37 if (wq
->state
!= IDXD_WQ_DISABLED
) {
38 dev_dbg(dev
, "%s not disabled\n", dev_name(dev
));
45 dev_dbg(dev
, "%s matched\n", dev_name(dev
));
50 static int enable_wq(struct idxd_wq
*wq
)
52 struct idxd_device
*idxd
= wq
->idxd
;
53 struct device
*dev
= &idxd
->pdev
->dev
;
57 mutex_lock(&wq
->wq_lock
);
59 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
60 mutex_unlock(&wq
->wq_lock
);
61 dev_warn(dev
, "Enabling while device not enabled.\n");
65 if (wq
->state
!= IDXD_WQ_DISABLED
) {
66 mutex_unlock(&wq
->wq_lock
);
67 dev_warn(dev
, "WQ %d already enabled.\n", wq
->id
);
72 mutex_unlock(&wq
->wq_lock
);
73 dev_warn(dev
, "WQ not attached to group.\n");
77 if (strlen(wq
->name
) == 0) {
78 mutex_unlock(&wq
->wq_lock
);
79 dev_warn(dev
, "WQ name not set.\n");
83 /* Shared WQ checks */
85 if (!device_swq_supported(idxd
)) {
86 dev_warn(dev
, "PASID not enabled and shared WQ.\n");
87 mutex_unlock(&wq
->wq_lock
);
91 * Shared wq with the threshold set to 0 means the user
92 * did not set the threshold or transitioned from a
93 * dedicated wq but did not set threshold. A value
94 * of 0 would effectively disable the shared wq. The
95 * driver does not allow a value of 0 to be set for
96 * threshold via sysfs.
98 if (wq
->threshold
== 0) {
99 dev_warn(dev
, "Shared WQ and threshold 0.\n");
100 mutex_unlock(&wq
->wq_lock
);
105 rc
= idxd_wq_alloc_resources(wq
);
107 mutex_unlock(&wq
->wq_lock
);
108 dev_warn(dev
, "WQ resource alloc failed\n");
112 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
113 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
114 rc
= idxd_device_config(idxd
);
115 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
117 mutex_unlock(&wq
->wq_lock
);
118 dev_warn(dev
, "Writing WQ %d config failed: %d\n", wq
->id
, rc
);
122 rc
= idxd_wq_enable(wq
);
124 mutex_unlock(&wq
->wq_lock
);
125 dev_warn(dev
, "WQ %d enabling failed: %d\n", wq
->id
, rc
);
129 rc
= idxd_wq_map_portal(wq
);
131 dev_warn(dev
, "wq portal mapping failed: %d\n", rc
);
132 rc
= idxd_wq_disable(wq
, false);
134 dev_warn(dev
, "IDXD wq disable failed\n");
135 mutex_unlock(&wq
->wq_lock
);
139 wq
->client_count
= 0;
141 if (wq
->type
== IDXD_WQT_KERNEL
) {
142 rc
= idxd_wq_init_percpu_ref(wq
);
144 dev_dbg(dev
, "percpu_ref setup failed\n");
145 mutex_unlock(&wq
->wq_lock
);
150 if (is_idxd_wq_dmaengine(wq
)) {
151 rc
= idxd_register_dma_channel(wq
);
153 dev_dbg(dev
, "DMA channel register failed\n");
154 mutex_unlock(&wq
->wq_lock
);
157 } else if (is_idxd_wq_cdev(wq
)) {
158 rc
= idxd_wq_add_cdev(wq
);
160 dev_dbg(dev
, "Cdev creation failed\n");
161 mutex_unlock(&wq
->wq_lock
);
166 mutex_unlock(&wq
->wq_lock
);
167 dev_info(dev
, "wq %s enabled\n", dev_name(wq_confdev(wq
)));
172 static int idxd_config_bus_probe(struct device
*dev
)
177 dev_dbg(dev
, "%s called\n", __func__
);
179 if (is_idxd_dev(dev
)) {
180 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
182 if (idxd
->state
!= IDXD_DEV_CONF_READY
) {
183 dev_warn(dev
, "Device not ready for config\n");
187 if (!try_module_get(THIS_MODULE
))
190 /* Perform IDXD configuration and enabling */
191 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
192 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
193 rc
= idxd_device_config(idxd
);
194 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
196 module_put(THIS_MODULE
);
197 dev_warn(dev
, "Device config failed: %d\n", rc
);
202 rc
= idxd_device_enable(idxd
);
204 module_put(THIS_MODULE
);
205 dev_warn(dev
, "Device enable failed: %d\n", rc
);
209 dev_info(dev
, "Device %s enabled\n", dev_name(dev
));
211 rc
= idxd_register_dma_device(idxd
);
213 module_put(THIS_MODULE
);
214 dev_dbg(dev
, "Failed to register dmaengine device\n");
218 } else if (is_idxd_wq_dev(dev
)) {
219 struct idxd_wq
*wq
= confdev_to_wq(dev
);
221 return enable_wq(wq
);
227 static void disable_wq(struct idxd_wq
*wq
)
229 struct idxd_device
*idxd
= wq
->idxd
;
230 struct device
*dev
= &idxd
->pdev
->dev
;
232 mutex_lock(&wq
->wq_lock
);
233 dev_dbg(dev
, "%s removing WQ %s\n", __func__
, dev_name(wq_confdev(wq
)));
234 if (wq
->state
== IDXD_WQ_DISABLED
) {
235 mutex_unlock(&wq
->wq_lock
);
239 if (wq
->type
== IDXD_WQT_KERNEL
)
242 if (is_idxd_wq_dmaengine(wq
))
243 idxd_unregister_dma_channel(wq
);
244 else if (is_idxd_wq_cdev(wq
))
245 idxd_wq_del_cdev(wq
);
247 if (idxd_wq_refcount(wq
))
248 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
249 wq
->id
, idxd_wq_refcount(wq
));
251 idxd_wq_unmap_portal(wq
);
256 idxd_wq_free_resources(wq
);
257 wq
->client_count
= 0;
258 mutex_unlock(&wq
->wq_lock
);
260 dev_info(dev
, "wq %s disabled\n", dev_name(wq_confdev(wq
)));
263 static int idxd_config_bus_remove(struct device
*dev
)
265 dev_dbg(dev
, "%s called for %s\n", __func__
, dev_name(dev
));
267 /* disable workqueue here */
268 if (is_idxd_wq_dev(dev
)) {
269 struct idxd_wq
*wq
= confdev_to_wq(dev
);
272 } else if (is_idxd_dev(dev
)) {
273 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
276 dev_dbg(dev
, "%s removing dev %s\n", __func__
,
277 dev_name(idxd_confdev(idxd
)));
278 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
279 struct idxd_wq
*wq
= idxd
->wqs
[i
];
281 if (wq
->state
== IDXD_WQ_DISABLED
)
283 dev_warn(dev
, "Active wq %d on disable %s.\n", i
,
284 dev_name(wq_confdev(wq
)));
285 device_release_driver(wq_confdev(wq
));
288 idxd_unregister_dma_device(idxd
);
289 idxd_device_disable(idxd
);
290 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
291 idxd_device_reset(idxd
);
292 module_put(THIS_MODULE
);
294 dev_info(dev
, "Device %s disabled\n", dev_name(dev
));
300 static void idxd_config_bus_shutdown(struct device
*dev
)
302 dev_dbg(dev
, "%s called\n", __func__
);
305 struct bus_type dsa_bus_type
= {
307 .match
= idxd_config_bus_match
,
308 .probe
= idxd_config_bus_probe
,
309 .remove
= idxd_config_bus_remove
,
310 .shutdown
= idxd_config_bus_shutdown
,
313 static struct idxd_device_driver dsa_drv
= {
317 /* IDXD generic driver setup */
318 int idxd_register_driver(void)
320 return idxd_driver_register(&dsa_drv
);
323 void idxd_unregister_driver(void)
325 idxd_driver_unregister(&dsa_drv
);
328 /* IDXD engine attributes */
329 static ssize_t
engine_group_id_show(struct device
*dev
,
330 struct device_attribute
*attr
, char *buf
)
332 struct idxd_engine
*engine
= confdev_to_engine(dev
);
335 return sysfs_emit(buf
, "%d\n", engine
->group
->id
);
337 return sysfs_emit(buf
, "%d\n", -1);
340 static ssize_t
engine_group_id_store(struct device
*dev
,
341 struct device_attribute
*attr
,
342 const char *buf
, size_t count
)
344 struct idxd_engine
*engine
= confdev_to_engine(dev
);
345 struct idxd_device
*idxd
= engine
->idxd
;
348 struct idxd_group
*prevg
;
350 rc
= kstrtol(buf
, 10, &id
);
354 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
357 if (id
> idxd
->max_groups
- 1 || id
< -1)
362 engine
->group
->num_engines
--;
363 engine
->group
= NULL
;
368 prevg
= engine
->group
;
371 prevg
->num_engines
--;
372 engine
->group
= idxd
->groups
[id
];
373 engine
->group
->num_engines
++;
378 static struct device_attribute dev_attr_engine_group
=
379 __ATTR(group_id
, 0644, engine_group_id_show
,
380 engine_group_id_store
);
382 static struct attribute
*idxd_engine_attributes
[] = {
383 &dev_attr_engine_group
.attr
,
387 static const struct attribute_group idxd_engine_attribute_group
= {
388 .attrs
= idxd_engine_attributes
,
391 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
392 &idxd_engine_attribute_group
,
396 static void idxd_conf_engine_release(struct device
*dev
)
398 struct idxd_engine
*engine
= confdev_to_engine(dev
);
403 struct device_type idxd_engine_device_type
= {
405 .release
= idxd_conf_engine_release
,
406 .groups
= idxd_engine_attribute_groups
,
409 /* Group attributes */
411 static void idxd_set_free_tokens(struct idxd_device
*idxd
)
415 for (i
= 0, tokens
= 0; i
< idxd
->max_groups
; i
++) {
416 struct idxd_group
*g
= idxd
->groups
[i
];
418 tokens
+= g
->tokens_reserved
;
421 idxd
->nr_tokens
= idxd
->max_tokens
- tokens
;
424 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
425 struct device_attribute
*attr
,
428 struct idxd_group
*group
= confdev_to_group(dev
);
430 return sysfs_emit(buf
, "%u\n", group
->tokens_reserved
);
433 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
434 struct device_attribute
*attr
,
435 const char *buf
, size_t count
)
437 struct idxd_group
*group
= confdev_to_group(dev
);
438 struct idxd_device
*idxd
= group
->idxd
;
442 rc
= kstrtoul(buf
, 10, &val
);
446 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
449 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
452 if (idxd
->state
== IDXD_DEV_ENABLED
)
455 if (val
> idxd
->max_tokens
)
458 if (val
> idxd
->nr_tokens
+ group
->tokens_reserved
)
461 group
->tokens_reserved
= val
;
462 idxd_set_free_tokens(idxd
);
466 static struct device_attribute dev_attr_group_tokens_reserved
=
467 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
468 group_tokens_reserved_store
);
470 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
471 struct device_attribute
*attr
,
474 struct idxd_group
*group
= confdev_to_group(dev
);
476 return sysfs_emit(buf
, "%u\n", group
->tokens_allowed
);
479 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
480 struct device_attribute
*attr
,
481 const char *buf
, size_t count
)
483 struct idxd_group
*group
= confdev_to_group(dev
);
484 struct idxd_device
*idxd
= group
->idxd
;
488 rc
= kstrtoul(buf
, 10, &val
);
492 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
495 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
498 if (idxd
->state
== IDXD_DEV_ENABLED
)
501 if (val
< 4 * group
->num_engines
||
502 val
> group
->tokens_reserved
+ idxd
->nr_tokens
)
505 group
->tokens_allowed
= val
;
509 static struct device_attribute dev_attr_group_tokens_allowed
=
510 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
511 group_tokens_allowed_store
);
513 static ssize_t
group_use_token_limit_show(struct device
*dev
,
514 struct device_attribute
*attr
,
517 struct idxd_group
*group
= confdev_to_group(dev
);
519 return sysfs_emit(buf
, "%u\n", group
->use_token_limit
);
522 static ssize_t
group_use_token_limit_store(struct device
*dev
,
523 struct device_attribute
*attr
,
524 const char *buf
, size_t count
)
526 struct idxd_group
*group
= confdev_to_group(dev
);
527 struct idxd_device
*idxd
= group
->idxd
;
531 rc
= kstrtoul(buf
, 10, &val
);
535 if (idxd
->data
->type
== IDXD_TYPE_IAX
)
538 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
541 if (idxd
->state
== IDXD_DEV_ENABLED
)
544 if (idxd
->token_limit
== 0)
547 group
->use_token_limit
= !!val
;
551 static struct device_attribute dev_attr_group_use_token_limit
=
552 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
553 group_use_token_limit_store
);
555 static ssize_t
group_engines_show(struct device
*dev
,
556 struct device_attribute
*attr
, char *buf
)
558 struct idxd_group
*group
= confdev_to_group(dev
);
560 struct idxd_device
*idxd
= group
->idxd
;
562 for (i
= 0; i
< idxd
->max_engines
; i
++) {
563 struct idxd_engine
*engine
= idxd
->engines
[i
];
568 if (engine
->group
->id
== group
->id
)
569 rc
+= sysfs_emit_at(buf
, rc
, "engine%d.%d ", idxd
->id
, engine
->id
);
575 rc
+= sysfs_emit_at(buf
, rc
, "\n");
580 static struct device_attribute dev_attr_group_engines
=
581 __ATTR(engines
, 0444, group_engines_show
, NULL
);
583 static ssize_t
group_work_queues_show(struct device
*dev
,
584 struct device_attribute
*attr
, char *buf
)
586 struct idxd_group
*group
= confdev_to_group(dev
);
588 struct idxd_device
*idxd
= group
->idxd
;
590 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
591 struct idxd_wq
*wq
= idxd
->wqs
[i
];
596 if (wq
->group
->id
== group
->id
)
597 rc
+= sysfs_emit_at(buf
, rc
, "wq%d.%d ", idxd
->id
, wq
->id
);
603 rc
+= sysfs_emit_at(buf
, rc
, "\n");
608 static struct device_attribute dev_attr_group_work_queues
=
609 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
611 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
612 struct device_attribute
*attr
,
615 struct idxd_group
*group
= confdev_to_group(dev
);
617 return sysfs_emit(buf
, "%d\n", group
->tc_a
);
620 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
621 struct device_attribute
*attr
,
622 const char *buf
, size_t count
)
624 struct idxd_group
*group
= confdev_to_group(dev
);
625 struct idxd_device
*idxd
= group
->idxd
;
629 rc
= kstrtol(buf
, 10, &val
);
633 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
636 if (idxd
->state
== IDXD_DEV_ENABLED
)
639 if (val
< 0 || val
> 7)
646 static struct device_attribute dev_attr_group_traffic_class_a
=
647 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
648 group_traffic_class_a_store
);
650 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
651 struct device_attribute
*attr
,
654 struct idxd_group
*group
= confdev_to_group(dev
);
656 return sysfs_emit(buf
, "%d\n", group
->tc_b
);
659 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
660 struct device_attribute
*attr
,
661 const char *buf
, size_t count
)
663 struct idxd_group
*group
= confdev_to_group(dev
);
664 struct idxd_device
*idxd
= group
->idxd
;
668 rc
= kstrtol(buf
, 10, &val
);
672 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
675 if (idxd
->state
== IDXD_DEV_ENABLED
)
678 if (val
< 0 || val
> 7)
685 static struct device_attribute dev_attr_group_traffic_class_b
=
686 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
687 group_traffic_class_b_store
);
689 static struct attribute
*idxd_group_attributes
[] = {
690 &dev_attr_group_work_queues
.attr
,
691 &dev_attr_group_engines
.attr
,
692 &dev_attr_group_use_token_limit
.attr
,
693 &dev_attr_group_tokens_allowed
.attr
,
694 &dev_attr_group_tokens_reserved
.attr
,
695 &dev_attr_group_traffic_class_a
.attr
,
696 &dev_attr_group_traffic_class_b
.attr
,
700 static const struct attribute_group idxd_group_attribute_group
= {
701 .attrs
= idxd_group_attributes
,
704 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
705 &idxd_group_attribute_group
,
709 static void idxd_conf_group_release(struct device
*dev
)
711 struct idxd_group
*group
= confdev_to_group(dev
);
716 struct device_type idxd_group_device_type
= {
718 .release
= idxd_conf_group_release
,
719 .groups
= idxd_group_attribute_groups
,
722 /* IDXD work queue attribs */
723 static ssize_t
wq_clients_show(struct device
*dev
,
724 struct device_attribute
*attr
, char *buf
)
726 struct idxd_wq
*wq
= confdev_to_wq(dev
);
728 return sysfs_emit(buf
, "%d\n", wq
->client_count
);
731 static struct device_attribute dev_attr_wq_clients
=
732 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
734 static ssize_t
wq_state_show(struct device
*dev
,
735 struct device_attribute
*attr
, char *buf
)
737 struct idxd_wq
*wq
= confdev_to_wq(dev
);
740 case IDXD_WQ_DISABLED
:
741 return sysfs_emit(buf
, "disabled\n");
742 case IDXD_WQ_ENABLED
:
743 return sysfs_emit(buf
, "enabled\n");
746 return sysfs_emit(buf
, "unknown\n");
749 static struct device_attribute dev_attr_wq_state
=
750 __ATTR(state
, 0444, wq_state_show
, NULL
);
752 static ssize_t
wq_group_id_show(struct device
*dev
,
753 struct device_attribute
*attr
, char *buf
)
755 struct idxd_wq
*wq
= confdev_to_wq(dev
);
758 return sysfs_emit(buf
, "%u\n", wq
->group
->id
);
760 return sysfs_emit(buf
, "-1\n");
763 static ssize_t
wq_group_id_store(struct device
*dev
,
764 struct device_attribute
*attr
,
765 const char *buf
, size_t count
)
767 struct idxd_wq
*wq
= confdev_to_wq(dev
);
768 struct idxd_device
*idxd
= wq
->idxd
;
771 struct idxd_group
*prevg
, *group
;
773 rc
= kstrtol(buf
, 10, &id
);
777 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
780 if (wq
->state
!= IDXD_WQ_DISABLED
)
783 if (id
> idxd
->max_groups
- 1 || id
< -1)
788 wq
->group
->num_wqs
--;
794 group
= idxd
->groups
[id
];
804 static struct device_attribute dev_attr_wq_group_id
=
805 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
807 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
810 struct idxd_wq
*wq
= confdev_to_wq(dev
);
812 return sysfs_emit(buf
, "%s\n", wq_dedicated(wq
) ? "dedicated" : "shared");
815 static ssize_t
wq_mode_store(struct device
*dev
,
816 struct device_attribute
*attr
, const char *buf
,
819 struct idxd_wq
*wq
= confdev_to_wq(dev
);
820 struct idxd_device
*idxd
= wq
->idxd
;
822 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
825 if (wq
->state
!= IDXD_WQ_DISABLED
)
828 if (sysfs_streq(buf
, "dedicated")) {
829 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
831 } else if (sysfs_streq(buf
, "shared") && device_swq_supported(idxd
)) {
832 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
840 static struct device_attribute dev_attr_wq_mode
=
841 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
843 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
846 struct idxd_wq
*wq
= confdev_to_wq(dev
);
848 return sysfs_emit(buf
, "%u\n", wq
->size
);
851 static int total_claimed_wq_size(struct idxd_device
*idxd
)
856 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
857 struct idxd_wq
*wq
= idxd
->wqs
[i
];
865 static ssize_t
wq_size_store(struct device
*dev
,
866 struct device_attribute
*attr
, const char *buf
,
869 struct idxd_wq
*wq
= confdev_to_wq(dev
);
871 struct idxd_device
*idxd
= wq
->idxd
;
874 rc
= kstrtoul(buf
, 10, &size
);
878 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
881 if (idxd
->state
== IDXD_DEV_ENABLED
)
884 if (size
+ total_claimed_wq_size(idxd
) - wq
->size
> idxd
->max_wq_size
)
891 static struct device_attribute dev_attr_wq_size
=
892 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
894 static ssize_t
wq_priority_show(struct device
*dev
,
895 struct device_attribute
*attr
, char *buf
)
897 struct idxd_wq
*wq
= confdev_to_wq(dev
);
899 return sysfs_emit(buf
, "%u\n", wq
->priority
);
902 static ssize_t
wq_priority_store(struct device
*dev
,
903 struct device_attribute
*attr
,
904 const char *buf
, size_t count
)
906 struct idxd_wq
*wq
= confdev_to_wq(dev
);
908 struct idxd_device
*idxd
= wq
->idxd
;
911 rc
= kstrtoul(buf
, 10, &prio
);
915 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
918 if (wq
->state
!= IDXD_WQ_DISABLED
)
921 if (prio
> IDXD_MAX_PRIORITY
)
928 static struct device_attribute dev_attr_wq_priority
=
929 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
931 static ssize_t
wq_block_on_fault_show(struct device
*dev
,
932 struct device_attribute
*attr
, char *buf
)
934 struct idxd_wq
*wq
= confdev_to_wq(dev
);
936 return sysfs_emit(buf
, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
));
939 static ssize_t
wq_block_on_fault_store(struct device
*dev
,
940 struct device_attribute
*attr
,
941 const char *buf
, size_t count
)
943 struct idxd_wq
*wq
= confdev_to_wq(dev
);
944 struct idxd_device
*idxd
= wq
->idxd
;
948 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
951 if (wq
->state
!= IDXD_WQ_DISABLED
)
954 rc
= kstrtobool(buf
, &bof
);
959 set_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
961 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
966 static struct device_attribute dev_attr_wq_block_on_fault
=
967 __ATTR(block_on_fault
, 0644, wq_block_on_fault_show
,
968 wq_block_on_fault_store
);
970 static ssize_t
wq_threshold_show(struct device
*dev
,
971 struct device_attribute
*attr
, char *buf
)
973 struct idxd_wq
*wq
= confdev_to_wq(dev
);
975 return sysfs_emit(buf
, "%u\n", wq
->threshold
);
978 static ssize_t
wq_threshold_store(struct device
*dev
,
979 struct device_attribute
*attr
,
980 const char *buf
, size_t count
)
982 struct idxd_wq
*wq
= confdev_to_wq(dev
);
983 struct idxd_device
*idxd
= wq
->idxd
;
987 rc
= kstrtouint(buf
, 0, &val
);
991 if (val
> wq
->size
|| val
<= 0)
994 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
997 if (wq
->state
!= IDXD_WQ_DISABLED
)
1000 if (test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
))
1003 wq
->threshold
= val
;
1008 static struct device_attribute dev_attr_wq_threshold
=
1009 __ATTR(threshold
, 0644, wq_threshold_show
, wq_threshold_store
);
1011 static ssize_t
wq_type_show(struct device
*dev
,
1012 struct device_attribute
*attr
, char *buf
)
1014 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1017 case IDXD_WQT_KERNEL
:
1018 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
1020 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_USER
]);
1023 return sysfs_emit(buf
, "%s\n", idxd_wq_type_names
[IDXD_WQT_NONE
]);
1029 static ssize_t
wq_type_store(struct device
*dev
,
1030 struct device_attribute
*attr
, const char *buf
,
1033 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1034 enum idxd_wq_type old_type
;
1036 if (wq
->state
!= IDXD_WQ_DISABLED
)
1039 old_type
= wq
->type
;
1040 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_NONE
]))
1041 wq
->type
= IDXD_WQT_NONE
;
1042 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
1043 wq
->type
= IDXD_WQT_KERNEL
;
1044 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
1045 wq
->type
= IDXD_WQT_USER
;
1049 /* If we are changing queue type, clear the name */
1050 if (wq
->type
!= old_type
)
1051 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1056 static struct device_attribute dev_attr_wq_type
=
1057 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
1059 static ssize_t
wq_name_show(struct device
*dev
,
1060 struct device_attribute
*attr
, char *buf
)
1062 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1064 return sysfs_emit(buf
, "%s\n", wq
->name
);
1067 static ssize_t
wq_name_store(struct device
*dev
,
1068 struct device_attribute
*attr
, const char *buf
,
1071 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1073 if (wq
->state
!= IDXD_WQ_DISABLED
)
1076 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
1080 * This is temporarily placed here until we have SVM support for
1083 if (wq
->type
== IDXD_WQT_KERNEL
&& device_pasid_enabled(wq
->idxd
))
1086 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1087 strncpy(wq
->name
, buf
, WQ_NAME_SIZE
);
1088 strreplace(wq
->name
, '\n', '\0');
1092 static struct device_attribute dev_attr_wq_name
=
1093 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
1095 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
1096 struct device_attribute
*attr
, char *buf
)
1098 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1101 mutex_lock(&wq
->wq_lock
);
1103 minor
= wq
->idxd_cdev
->minor
;
1104 mutex_unlock(&wq
->wq_lock
);
1108 return sysfs_emit(buf
, "%d\n", minor
);
1111 static struct device_attribute dev_attr_wq_cdev_minor
=
1112 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
1114 static int __get_sysfs_u64(const char *buf
, u64
*val
)
1118 rc
= kstrtou64(buf
, 0, val
);
1125 *val
= roundup_pow_of_two(*val
);
1129 static ssize_t
wq_max_transfer_size_show(struct device
*dev
, struct device_attribute
*attr
,
1132 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1134 return sysfs_emit(buf
, "%llu\n", wq
->max_xfer_bytes
);
1137 static ssize_t
wq_max_transfer_size_store(struct device
*dev
, struct device_attribute
*attr
,
1138 const char *buf
, size_t count
)
1140 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1141 struct idxd_device
*idxd
= wq
->idxd
;
1145 if (wq
->state
!= IDXD_WQ_DISABLED
)
1148 rc
= __get_sysfs_u64(buf
, &xfer_size
);
1152 if (xfer_size
> idxd
->max_xfer_bytes
)
1155 wq
->max_xfer_bytes
= xfer_size
;
1160 static struct device_attribute dev_attr_wq_max_transfer_size
=
1161 __ATTR(max_transfer_size
, 0644,
1162 wq_max_transfer_size_show
, wq_max_transfer_size_store
);
1164 static ssize_t
wq_max_batch_size_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1166 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1168 return sysfs_emit(buf
, "%u\n", wq
->max_batch_size
);
1171 static ssize_t
wq_max_batch_size_store(struct device
*dev
, struct device_attribute
*attr
,
1172 const char *buf
, size_t count
)
1174 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1175 struct idxd_device
*idxd
= wq
->idxd
;
1179 if (wq
->state
!= IDXD_WQ_DISABLED
)
1182 rc
= __get_sysfs_u64(buf
, &batch_size
);
1186 if (batch_size
> idxd
->max_batch_size
)
1189 wq
->max_batch_size
= (u32
)batch_size
;
1194 static struct device_attribute dev_attr_wq_max_batch_size
=
1195 __ATTR(max_batch_size
, 0644, wq_max_batch_size_show
, wq_max_batch_size_store
);
1197 static ssize_t
wq_ats_disable_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1199 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1201 return sysfs_emit(buf
, "%u\n", wq
->ats_dis
);
1204 static ssize_t
wq_ats_disable_store(struct device
*dev
, struct device_attribute
*attr
,
1205 const char *buf
, size_t count
)
1207 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1208 struct idxd_device
*idxd
= wq
->idxd
;
1212 if (wq
->state
!= IDXD_WQ_DISABLED
)
1215 if (!idxd
->hw
.wq_cap
.wq_ats_support
)
1218 rc
= kstrtobool(buf
, &ats_dis
);
1222 wq
->ats_dis
= ats_dis
;
1227 static struct device_attribute dev_attr_wq_ats_disable
=
1228 __ATTR(ats_disable
, 0644, wq_ats_disable_show
, wq_ats_disable_store
);
1230 static ssize_t
wq_occupancy_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1232 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1233 struct idxd_device
*idxd
= wq
->idxd
;
1236 if (!idxd
->hw
.wq_cap
.occupancy
)
1239 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_OCCUP_IDX
);
1240 occup
= ioread32(idxd
->reg_base
+ offset
) & WQCFG_OCCUP_MASK
;
1242 return sysfs_emit(buf
, "%u\n", occup
);
1245 static struct device_attribute dev_attr_wq_occupancy
=
1246 __ATTR(occupancy
, 0444, wq_occupancy_show
, NULL
);
1248 static struct attribute
*idxd_wq_attributes
[] = {
1249 &dev_attr_wq_clients
.attr
,
1250 &dev_attr_wq_state
.attr
,
1251 &dev_attr_wq_group_id
.attr
,
1252 &dev_attr_wq_mode
.attr
,
1253 &dev_attr_wq_size
.attr
,
1254 &dev_attr_wq_priority
.attr
,
1255 &dev_attr_wq_block_on_fault
.attr
,
1256 &dev_attr_wq_threshold
.attr
,
1257 &dev_attr_wq_type
.attr
,
1258 &dev_attr_wq_name
.attr
,
1259 &dev_attr_wq_cdev_minor
.attr
,
1260 &dev_attr_wq_max_transfer_size
.attr
,
1261 &dev_attr_wq_max_batch_size
.attr
,
1262 &dev_attr_wq_ats_disable
.attr
,
1263 &dev_attr_wq_occupancy
.attr
,
1267 static const struct attribute_group idxd_wq_attribute_group
= {
1268 .attrs
= idxd_wq_attributes
,
1271 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1272 &idxd_wq_attribute_group
,
1276 static void idxd_conf_wq_release(struct device
*dev
)
1278 struct idxd_wq
*wq
= confdev_to_wq(dev
);
1284 struct device_type idxd_wq_device_type
= {
1286 .release
= idxd_conf_wq_release
,
1287 .groups
= idxd_wq_attribute_groups
,
1290 /* IDXD device attribs */
1291 static ssize_t
version_show(struct device
*dev
, struct device_attribute
*attr
,
1294 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1296 return sysfs_emit(buf
, "%#x\n", idxd
->hw
.version
);
1298 static DEVICE_ATTR_RO(version
);
1300 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1301 struct device_attribute
*attr
,
1304 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1306 return sysfs_emit(buf
, "%u\n", idxd
->max_wq_size
);
1308 static DEVICE_ATTR_RO(max_work_queues_size
);
1310 static ssize_t
max_groups_show(struct device
*dev
,
1311 struct device_attribute
*attr
, char *buf
)
1313 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1315 return sysfs_emit(buf
, "%u\n", idxd
->max_groups
);
1317 static DEVICE_ATTR_RO(max_groups
);
1319 static ssize_t
max_work_queues_show(struct device
*dev
,
1320 struct device_attribute
*attr
, char *buf
)
1322 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1324 return sysfs_emit(buf
, "%u\n", idxd
->max_wqs
);
1326 static DEVICE_ATTR_RO(max_work_queues
);
1328 static ssize_t
max_engines_show(struct device
*dev
,
1329 struct device_attribute
*attr
, char *buf
)
1331 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1333 return sysfs_emit(buf
, "%u\n", idxd
->max_engines
);
1335 static DEVICE_ATTR_RO(max_engines
);
1337 static ssize_t
numa_node_show(struct device
*dev
,
1338 struct device_attribute
*attr
, char *buf
)
1340 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1342 return sysfs_emit(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1344 static DEVICE_ATTR_RO(numa_node
);
1346 static ssize_t
max_batch_size_show(struct device
*dev
,
1347 struct device_attribute
*attr
, char *buf
)
1349 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1351 return sysfs_emit(buf
, "%u\n", idxd
->max_batch_size
);
1353 static DEVICE_ATTR_RO(max_batch_size
);
1355 static ssize_t
max_transfer_size_show(struct device
*dev
,
1356 struct device_attribute
*attr
,
1359 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1361 return sysfs_emit(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1363 static DEVICE_ATTR_RO(max_transfer_size
);
1365 static ssize_t
op_cap_show(struct device
*dev
,
1366 struct device_attribute
*attr
, char *buf
)
1368 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1371 for (i
= 0; i
< 4; i
++)
1372 rc
+= sysfs_emit_at(buf
, rc
, "%#llx ", idxd
->hw
.opcap
.bits
[i
]);
1375 rc
+= sysfs_emit_at(buf
, rc
, "\n");
1378 static DEVICE_ATTR_RO(op_cap
);
1380 static ssize_t
gen_cap_show(struct device
*dev
,
1381 struct device_attribute
*attr
, char *buf
)
1383 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1385 return sysfs_emit(buf
, "%#llx\n", idxd
->hw
.gen_cap
.bits
);
1387 static DEVICE_ATTR_RO(gen_cap
);
1389 static ssize_t
configurable_show(struct device
*dev
,
1390 struct device_attribute
*attr
, char *buf
)
1392 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1394 return sysfs_emit(buf
, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1396 static DEVICE_ATTR_RO(configurable
);
1398 static ssize_t
clients_show(struct device
*dev
,
1399 struct device_attribute
*attr
, char *buf
)
1401 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1402 unsigned long flags
;
1405 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1406 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1407 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1409 count
+= wq
->client_count
;
1411 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1413 return sysfs_emit(buf
, "%d\n", count
);
1415 static DEVICE_ATTR_RO(clients
);
1417 static ssize_t
pasid_enabled_show(struct device
*dev
,
1418 struct device_attribute
*attr
, char *buf
)
1420 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1422 return sysfs_emit(buf
, "%u\n", device_pasid_enabled(idxd
));
1424 static DEVICE_ATTR_RO(pasid_enabled
);
1426 static ssize_t
state_show(struct device
*dev
,
1427 struct device_attribute
*attr
, char *buf
)
1429 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1431 switch (idxd
->state
) {
1432 case IDXD_DEV_DISABLED
:
1433 case IDXD_DEV_CONF_READY
:
1434 return sysfs_emit(buf
, "disabled\n");
1435 case IDXD_DEV_ENABLED
:
1436 return sysfs_emit(buf
, "enabled\n");
1437 case IDXD_DEV_HALTED
:
1438 return sysfs_emit(buf
, "halted\n");
1441 return sysfs_emit(buf
, "unknown\n");
1443 static DEVICE_ATTR_RO(state
);
1445 static ssize_t
errors_show(struct device
*dev
,
1446 struct device_attribute
*attr
, char *buf
)
1448 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1450 unsigned long flags
;
1452 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1453 for (i
= 0; i
< 4; i
++)
1454 out
+= sysfs_emit_at(buf
, out
, "%#018llx ", idxd
->sw_err
.bits
[i
]);
1455 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1457 out
+= sysfs_emit_at(buf
, out
, "\n");
1460 static DEVICE_ATTR_RO(errors
);
1462 static ssize_t
max_tokens_show(struct device
*dev
,
1463 struct device_attribute
*attr
, char *buf
)
1465 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1467 return sysfs_emit(buf
, "%u\n", idxd
->max_tokens
);
1469 static DEVICE_ATTR_RO(max_tokens
);
1471 static ssize_t
token_limit_show(struct device
*dev
,
1472 struct device_attribute
*attr
, char *buf
)
1474 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1476 return sysfs_emit(buf
, "%u\n", idxd
->token_limit
);
1479 static ssize_t
token_limit_store(struct device
*dev
,
1480 struct device_attribute
*attr
,
1481 const char *buf
, size_t count
)
1483 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1487 rc
= kstrtoul(buf
, 10, &val
);
1491 if (idxd
->state
== IDXD_DEV_ENABLED
)
1494 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1497 if (!idxd
->hw
.group_cap
.token_limit
)
1500 if (val
> idxd
->hw
.group_cap
.total_tokens
)
1503 idxd
->token_limit
= val
;
1506 static DEVICE_ATTR_RW(token_limit
);
1508 static ssize_t
cdev_major_show(struct device
*dev
,
1509 struct device_attribute
*attr
, char *buf
)
1511 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1513 return sysfs_emit(buf
, "%u\n", idxd
->major
);
1515 static DEVICE_ATTR_RO(cdev_major
);
1517 static ssize_t
cmd_status_show(struct device
*dev
,
1518 struct device_attribute
*attr
, char *buf
)
1520 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1522 return sysfs_emit(buf
, "%#x\n", idxd
->cmd_status
);
1524 static DEVICE_ATTR_RO(cmd_status
);
1526 static struct attribute
*idxd_device_attributes
[] = {
1527 &dev_attr_version
.attr
,
1528 &dev_attr_max_groups
.attr
,
1529 &dev_attr_max_work_queues
.attr
,
1530 &dev_attr_max_work_queues_size
.attr
,
1531 &dev_attr_max_engines
.attr
,
1532 &dev_attr_numa_node
.attr
,
1533 &dev_attr_max_batch_size
.attr
,
1534 &dev_attr_max_transfer_size
.attr
,
1535 &dev_attr_op_cap
.attr
,
1536 &dev_attr_gen_cap
.attr
,
1537 &dev_attr_configurable
.attr
,
1538 &dev_attr_clients
.attr
,
1539 &dev_attr_pasid_enabled
.attr
,
1540 &dev_attr_state
.attr
,
1541 &dev_attr_errors
.attr
,
1542 &dev_attr_max_tokens
.attr
,
1543 &dev_attr_token_limit
.attr
,
1544 &dev_attr_cdev_major
.attr
,
1545 &dev_attr_cmd_status
.attr
,
1549 static const struct attribute_group idxd_device_attribute_group
= {
1550 .attrs
= idxd_device_attributes
,
1553 static const struct attribute_group
*idxd_attribute_groups
[] = {
1554 &idxd_device_attribute_group
,
1558 static void idxd_conf_device_release(struct device
*dev
)
1560 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
1562 kfree(idxd
->groups
);
1564 kfree(idxd
->engines
);
1565 kfree(idxd
->irq_entries
);
1566 kfree(idxd
->int_handles
);
1567 ida_free(&idxd_ida
, idxd
->id
);
1571 struct device_type dsa_device_type
= {
1573 .release
= idxd_conf_device_release
,
1574 .groups
= idxd_attribute_groups
,
1577 struct device_type iax_device_type
= {
1579 .release
= idxd_conf_device_release
,
1580 .groups
= idxd_attribute_groups
,
1583 static int idxd_register_engine_devices(struct idxd_device
*idxd
)
1585 struct idxd_engine
*engine
;
1588 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1589 engine
= idxd
->engines
[i
];
1590 rc
= device_add(engine_confdev(engine
));
1599 for (; i
< idxd
->max_engines
; i
++) {
1600 engine
= idxd
->engines
[i
];
1601 put_device(engine_confdev(engine
));
1605 engine
= idxd
->engines
[j
];
1606 device_unregister(engine_confdev(engine
));
1611 static int idxd_register_group_devices(struct idxd_device
*idxd
)
1613 struct idxd_group
*group
;
1616 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1617 group
= idxd
->groups
[i
];
1618 rc
= device_add(group_confdev(group
));
1627 for (; i
< idxd
->max_groups
; i
++) {
1628 group
= idxd
->groups
[i
];
1629 put_device(group_confdev(group
));
1633 group
= idxd
->groups
[j
];
1634 device_unregister(group_confdev(group
));
1639 static int idxd_register_wq_devices(struct idxd_device
*idxd
)
1644 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1646 rc
= device_add(wq_confdev(wq
));
1655 for (; i
< idxd
->max_wqs
; i
++) {
1657 put_device(wq_confdev(wq
));
1662 device_unregister(wq_confdev(wq
));
1667 int idxd_register_devices(struct idxd_device
*idxd
)
1669 struct device
*dev
= &idxd
->pdev
->dev
;
1672 rc
= device_add(idxd_confdev(idxd
));
1676 rc
= idxd_register_wq_devices(idxd
);
1678 dev_dbg(dev
, "WQ devices registering failed: %d\n", rc
);
1682 rc
= idxd_register_engine_devices(idxd
);
1684 dev_dbg(dev
, "Engine devices registering failed: %d\n", rc
);
1688 rc
= idxd_register_group_devices(idxd
);
1690 dev_dbg(dev
, "Group device registering failed: %d\n", rc
);
1697 for (i
= 0; i
< idxd
->max_engines
; i
++)
1698 device_unregister(engine_confdev(idxd
->engines
[i
]));
1700 for (i
= 0; i
< idxd
->max_wqs
; i
++)
1701 device_unregister(wq_confdev(idxd
->wqs
[i
]));
1703 device_del(idxd_confdev(idxd
));
1707 void idxd_unregister_devices(struct idxd_device
*idxd
)
1711 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1712 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1714 device_unregister(wq_confdev(wq
));
1717 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1718 struct idxd_engine
*engine
= idxd
->engines
[i
];
1720 device_unregister(engine_confdev(engine
));
1723 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1724 struct idxd_group
*group
= idxd
->groups
[i
];
1726 device_unregister(group_confdev(group
));
1730 int idxd_register_bus_type(void)
1732 return bus_register(&dsa_bus_type
);
1735 void idxd_unregister_bus_type(void)
1737 bus_unregister(&dsa_bus_type
);