1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names
[] = {
14 [IDXD_WQT_NONE
] = "none",
15 [IDXD_WQT_KERNEL
] = "kernel",
16 [IDXD_WQT_USER
] = "user",
19 static void idxd_conf_device_release(struct device
*dev
)
21 dev_dbg(dev
, "%s for %s\n", __func__
, dev_name(dev
));
24 static struct device_type idxd_group_device_type
= {
26 .release
= idxd_conf_device_release
,
29 static struct device_type idxd_wq_device_type
= {
31 .release
= idxd_conf_device_release
,
34 static struct device_type idxd_engine_device_type
= {
36 .release
= idxd_conf_device_release
,
39 static struct device_type dsa_device_type
= {
41 .release
= idxd_conf_device_release
,
44 static struct device_type iax_device_type
= {
46 .release
= idxd_conf_device_release
,
49 static inline bool is_dsa_dev(struct device
*dev
)
51 return dev
? dev
->type
== &dsa_device_type
: false;
54 static inline bool is_iax_dev(struct device
*dev
)
56 return dev
? dev
->type
== &iax_device_type
: false;
59 static inline bool is_idxd_dev(struct device
*dev
)
61 return is_dsa_dev(dev
) || is_iax_dev(dev
);
64 static inline bool is_idxd_wq_dev(struct device
*dev
)
66 return dev
? dev
->type
== &idxd_wq_device_type
: false;
69 static inline bool is_idxd_wq_dmaengine(struct idxd_wq
*wq
)
71 if (wq
->type
== IDXD_WQT_KERNEL
&&
72 strcmp(wq
->name
, "dmaengine") == 0)
77 static inline bool is_idxd_wq_cdev(struct idxd_wq
*wq
)
79 return wq
->type
== IDXD_WQT_USER
;
82 static int idxd_config_bus_match(struct device
*dev
,
83 struct device_driver
*drv
)
87 if (is_idxd_dev(dev
)) {
88 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
90 if (idxd
->state
!= IDXD_DEV_CONF_READY
)
93 } else if (is_idxd_wq_dev(dev
)) {
94 struct idxd_wq
*wq
= confdev_to_wq(dev
);
95 struct idxd_device
*idxd
= wq
->idxd
;
97 if (idxd
->state
< IDXD_DEV_CONF_READY
)
100 if (wq
->state
!= IDXD_WQ_DISABLED
) {
101 dev_dbg(dev
, "%s not disabled\n", dev_name(dev
));
108 dev_dbg(dev
, "%s matched\n", dev_name(dev
));
113 static int idxd_config_bus_probe(struct device
*dev
)
118 dev_dbg(dev
, "%s called\n", __func__
);
120 if (is_idxd_dev(dev
)) {
121 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
123 if (idxd
->state
!= IDXD_DEV_CONF_READY
) {
124 dev_warn(dev
, "Device not ready for config\n");
128 if (!try_module_get(THIS_MODULE
))
131 /* Perform IDXD configuration and enabling */
132 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
133 rc
= idxd_device_config(idxd
);
134 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
136 module_put(THIS_MODULE
);
137 dev_warn(dev
, "Device config failed: %d\n", rc
);
142 rc
= idxd_device_enable(idxd
);
144 module_put(THIS_MODULE
);
145 dev_warn(dev
, "Device enable failed: %d\n", rc
);
149 dev_info(dev
, "Device %s enabled\n", dev_name(dev
));
151 rc
= idxd_register_dma_device(idxd
);
153 module_put(THIS_MODULE
);
154 dev_dbg(dev
, "Failed to register dmaengine device\n");
158 } else if (is_idxd_wq_dev(dev
)) {
159 struct idxd_wq
*wq
= confdev_to_wq(dev
);
160 struct idxd_device
*idxd
= wq
->idxd
;
162 mutex_lock(&wq
->wq_lock
);
164 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
165 mutex_unlock(&wq
->wq_lock
);
166 dev_warn(dev
, "Enabling while device not enabled.\n");
170 if (wq
->state
!= IDXD_WQ_DISABLED
) {
171 mutex_unlock(&wq
->wq_lock
);
172 dev_warn(dev
, "WQ %d already enabled.\n", wq
->id
);
177 mutex_unlock(&wq
->wq_lock
);
178 dev_warn(dev
, "WQ not attached to group.\n");
182 if (strlen(wq
->name
) == 0) {
183 mutex_unlock(&wq
->wq_lock
);
184 dev_warn(dev
, "WQ name not set.\n");
188 /* Shared WQ checks */
190 if (!device_swq_supported(idxd
)) {
192 "PASID not enabled and shared WQ.\n");
193 mutex_unlock(&wq
->wq_lock
);
197 * Shared wq with the threshold set to 0 means the user
198 * did not set the threshold or transitioned from a
199 * dedicated wq but did not set threshold. A value
200 * of 0 would effectively disable the shared wq. The
201 * driver does not allow a value of 0 to be set for
202 * threshold via sysfs.
204 if (wq
->threshold
== 0) {
206 "Shared WQ and threshold 0.\n");
207 mutex_unlock(&wq
->wq_lock
);
212 rc
= idxd_wq_alloc_resources(wq
);
214 mutex_unlock(&wq
->wq_lock
);
215 dev_warn(dev
, "WQ resource alloc failed\n");
219 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
220 rc
= idxd_device_config(idxd
);
221 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
223 mutex_unlock(&wq
->wq_lock
);
224 dev_warn(dev
, "Writing WQ %d config failed: %d\n",
229 rc
= idxd_wq_enable(wq
);
231 mutex_unlock(&wq
->wq_lock
);
232 dev_warn(dev
, "WQ %d enabling failed: %d\n",
237 rc
= idxd_wq_map_portal(wq
);
239 dev_warn(dev
, "wq portal mapping failed: %d\n", rc
);
240 rc
= idxd_wq_disable(wq
);
242 dev_warn(dev
, "IDXD wq disable failed\n");
243 mutex_unlock(&wq
->wq_lock
);
247 wq
->client_count
= 0;
249 dev_info(dev
, "wq %s enabled\n", dev_name(&wq
->conf_dev
));
251 if (is_idxd_wq_dmaengine(wq
)) {
252 rc
= idxd_register_dma_channel(wq
);
254 dev_dbg(dev
, "DMA channel register failed\n");
255 mutex_unlock(&wq
->wq_lock
);
258 } else if (is_idxd_wq_cdev(wq
)) {
259 rc
= idxd_wq_add_cdev(wq
);
261 dev_dbg(dev
, "Cdev creation failed\n");
262 mutex_unlock(&wq
->wq_lock
);
267 mutex_unlock(&wq
->wq_lock
);
274 static void disable_wq(struct idxd_wq
*wq
)
276 struct idxd_device
*idxd
= wq
->idxd
;
277 struct device
*dev
= &idxd
->pdev
->dev
;
279 mutex_lock(&wq
->wq_lock
);
280 dev_dbg(dev
, "%s removing WQ %s\n", __func__
, dev_name(&wq
->conf_dev
));
281 if (wq
->state
== IDXD_WQ_DISABLED
) {
282 mutex_unlock(&wq
->wq_lock
);
286 if (is_idxd_wq_dmaengine(wq
))
287 idxd_unregister_dma_channel(wq
);
288 else if (is_idxd_wq_cdev(wq
))
289 idxd_wq_del_cdev(wq
);
291 if (idxd_wq_refcount(wq
))
292 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
293 wq
->id
, idxd_wq_refcount(wq
));
295 idxd_wq_unmap_portal(wq
);
300 idxd_wq_free_resources(wq
);
301 wq
->client_count
= 0;
302 mutex_unlock(&wq
->wq_lock
);
304 dev_info(dev
, "wq %s disabled\n", dev_name(&wq
->conf_dev
));
307 static int idxd_config_bus_remove(struct device
*dev
)
311 dev_dbg(dev
, "%s called for %s\n", __func__
, dev_name(dev
));
313 /* disable workqueue here */
314 if (is_idxd_wq_dev(dev
)) {
315 struct idxd_wq
*wq
= confdev_to_wq(dev
);
318 } else if (is_idxd_dev(dev
)) {
319 struct idxd_device
*idxd
= confdev_to_idxd(dev
);
322 dev_dbg(dev
, "%s removing dev %s\n", __func__
,
323 dev_name(&idxd
->conf_dev
));
324 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
325 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
327 if (wq
->state
== IDXD_WQ_DISABLED
)
329 dev_warn(dev
, "Active wq %d on disable %s.\n", i
,
330 dev_name(&idxd
->conf_dev
));
331 device_release_driver(&wq
->conf_dev
);
334 idxd_unregister_dma_device(idxd
);
335 rc
= idxd_device_disable(idxd
);
336 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
337 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
339 mutex_lock(&wq
->wq_lock
);
340 idxd_wq_disable_cleanup(wq
);
341 mutex_unlock(&wq
->wq_lock
);
343 module_put(THIS_MODULE
);
345 dev_warn(dev
, "Device disable failed\n");
347 dev_info(dev
, "Device %s disabled\n", dev_name(dev
));
354 static void idxd_config_bus_shutdown(struct device
*dev
)
356 dev_dbg(dev
, "%s called\n", __func__
);
359 struct bus_type dsa_bus_type
= {
361 .match
= idxd_config_bus_match
,
362 .probe
= idxd_config_bus_probe
,
363 .remove
= idxd_config_bus_remove
,
364 .shutdown
= idxd_config_bus_shutdown
,
367 struct bus_type iax_bus_type
= {
369 .match
= idxd_config_bus_match
,
370 .probe
= idxd_config_bus_probe
,
371 .remove
= idxd_config_bus_remove
,
372 .shutdown
= idxd_config_bus_shutdown
,
375 static struct bus_type
*idxd_bus_types
[] = {
380 static struct idxd_device_driver dsa_drv
= {
383 .bus
= &dsa_bus_type
,
384 .owner
= THIS_MODULE
,
385 .mod_name
= KBUILD_MODNAME
,
389 static struct idxd_device_driver iax_drv
= {
392 .bus
= &iax_bus_type
,
393 .owner
= THIS_MODULE
,
394 .mod_name
= KBUILD_MODNAME
,
398 static struct idxd_device_driver
*idxd_drvs
[] = {
403 struct bus_type
*idxd_get_bus_type(struct idxd_device
*idxd
)
405 return idxd_bus_types
[idxd
->type
];
408 static struct device_type
*idxd_get_device_type(struct idxd_device
*idxd
)
410 if (idxd
->type
== IDXD_TYPE_DSA
)
411 return &dsa_device_type
;
412 else if (idxd
->type
== IDXD_TYPE_IAX
)
413 return &iax_device_type
;
418 /* IDXD generic driver setup */
419 int idxd_register_driver(void)
423 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
424 rc
= driver_register(&idxd_drvs
[i
]->drv
);
433 driver_unregister(&idxd_drvs
[i
]->drv
);
437 void idxd_unregister_driver(void)
441 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
442 driver_unregister(&idxd_drvs
[i
]->drv
);
445 /* IDXD engine attributes */
446 static ssize_t
engine_group_id_show(struct device
*dev
,
447 struct device_attribute
*attr
, char *buf
)
449 struct idxd_engine
*engine
=
450 container_of(dev
, struct idxd_engine
, conf_dev
);
453 return sprintf(buf
, "%d\n", engine
->group
->id
);
455 return sprintf(buf
, "%d\n", -1);
458 static ssize_t
engine_group_id_store(struct device
*dev
,
459 struct device_attribute
*attr
,
460 const char *buf
, size_t count
)
462 struct idxd_engine
*engine
=
463 container_of(dev
, struct idxd_engine
, conf_dev
);
464 struct idxd_device
*idxd
= engine
->idxd
;
467 struct idxd_group
*prevg
;
469 rc
= kstrtol(buf
, 10, &id
);
473 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
476 if (id
> idxd
->max_groups
- 1 || id
< -1)
481 engine
->group
->num_engines
--;
482 engine
->group
= NULL
;
487 prevg
= engine
->group
;
490 prevg
->num_engines
--;
491 engine
->group
= &idxd
->groups
[id
];
492 engine
->group
->num_engines
++;
497 static struct device_attribute dev_attr_engine_group
=
498 __ATTR(group_id
, 0644, engine_group_id_show
,
499 engine_group_id_store
);
501 static struct attribute
*idxd_engine_attributes
[] = {
502 &dev_attr_engine_group
.attr
,
506 static const struct attribute_group idxd_engine_attribute_group
= {
507 .attrs
= idxd_engine_attributes
,
510 static const struct attribute_group
*idxd_engine_attribute_groups
[] = {
511 &idxd_engine_attribute_group
,
515 /* Group attributes */
517 static void idxd_set_free_tokens(struct idxd_device
*idxd
)
521 for (i
= 0, tokens
= 0; i
< idxd
->max_groups
; i
++) {
522 struct idxd_group
*g
= &idxd
->groups
[i
];
524 tokens
+= g
->tokens_reserved
;
527 idxd
->nr_tokens
= idxd
->max_tokens
- tokens
;
530 static ssize_t
group_tokens_reserved_show(struct device
*dev
,
531 struct device_attribute
*attr
,
534 struct idxd_group
*group
=
535 container_of(dev
, struct idxd_group
, conf_dev
);
537 return sprintf(buf
, "%u\n", group
->tokens_reserved
);
540 static ssize_t
group_tokens_reserved_store(struct device
*dev
,
541 struct device_attribute
*attr
,
542 const char *buf
, size_t count
)
544 struct idxd_group
*group
=
545 container_of(dev
, struct idxd_group
, conf_dev
);
546 struct idxd_device
*idxd
= group
->idxd
;
550 rc
= kstrtoul(buf
, 10, &val
);
554 if (idxd
->type
== IDXD_TYPE_IAX
)
557 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
560 if (idxd
->state
== IDXD_DEV_ENABLED
)
563 if (val
> idxd
->max_tokens
)
566 if (val
> idxd
->nr_tokens
+ group
->tokens_reserved
)
569 group
->tokens_reserved
= val
;
570 idxd_set_free_tokens(idxd
);
574 static struct device_attribute dev_attr_group_tokens_reserved
=
575 __ATTR(tokens_reserved
, 0644, group_tokens_reserved_show
,
576 group_tokens_reserved_store
);
578 static ssize_t
group_tokens_allowed_show(struct device
*dev
,
579 struct device_attribute
*attr
,
582 struct idxd_group
*group
=
583 container_of(dev
, struct idxd_group
, conf_dev
);
585 return sprintf(buf
, "%u\n", group
->tokens_allowed
);
588 static ssize_t
group_tokens_allowed_store(struct device
*dev
,
589 struct device_attribute
*attr
,
590 const char *buf
, size_t count
)
592 struct idxd_group
*group
=
593 container_of(dev
, struct idxd_group
, conf_dev
);
594 struct idxd_device
*idxd
= group
->idxd
;
598 rc
= kstrtoul(buf
, 10, &val
);
602 if (idxd
->type
== IDXD_TYPE_IAX
)
605 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
608 if (idxd
->state
== IDXD_DEV_ENABLED
)
611 if (val
< 4 * group
->num_engines
||
612 val
> group
->tokens_reserved
+ idxd
->nr_tokens
)
615 group
->tokens_allowed
= val
;
619 static struct device_attribute dev_attr_group_tokens_allowed
=
620 __ATTR(tokens_allowed
, 0644, group_tokens_allowed_show
,
621 group_tokens_allowed_store
);
623 static ssize_t
group_use_token_limit_show(struct device
*dev
,
624 struct device_attribute
*attr
,
627 struct idxd_group
*group
=
628 container_of(dev
, struct idxd_group
, conf_dev
);
630 return sprintf(buf
, "%u\n", group
->use_token_limit
);
633 static ssize_t
group_use_token_limit_store(struct device
*dev
,
634 struct device_attribute
*attr
,
635 const char *buf
, size_t count
)
637 struct idxd_group
*group
=
638 container_of(dev
, struct idxd_group
, conf_dev
);
639 struct idxd_device
*idxd
= group
->idxd
;
643 rc
= kstrtoul(buf
, 10, &val
);
647 if (idxd
->type
== IDXD_TYPE_IAX
)
650 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
653 if (idxd
->state
== IDXD_DEV_ENABLED
)
656 if (idxd
->token_limit
== 0)
659 group
->use_token_limit
= !!val
;
663 static struct device_attribute dev_attr_group_use_token_limit
=
664 __ATTR(use_token_limit
, 0644, group_use_token_limit_show
,
665 group_use_token_limit_store
);
667 static ssize_t
group_engines_show(struct device
*dev
,
668 struct device_attribute
*attr
, char *buf
)
670 struct idxd_group
*group
=
671 container_of(dev
, struct idxd_group
, conf_dev
);
674 struct idxd_device
*idxd
= group
->idxd
;
676 for (i
= 0; i
< idxd
->max_engines
; i
++) {
677 struct idxd_engine
*engine
= &idxd
->engines
[i
];
682 if (engine
->group
->id
== group
->id
)
683 rc
+= sprintf(tmp
+ rc
, "engine%d.%d ",
684 idxd
->id
, engine
->id
);
688 rc
+= sprintf(tmp
+ rc
, "\n");
693 static struct device_attribute dev_attr_group_engines
=
694 __ATTR(engines
, 0444, group_engines_show
, NULL
);
696 static ssize_t
group_work_queues_show(struct device
*dev
,
697 struct device_attribute
*attr
, char *buf
)
699 struct idxd_group
*group
=
700 container_of(dev
, struct idxd_group
, conf_dev
);
703 struct idxd_device
*idxd
= group
->idxd
;
705 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
706 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
711 if (wq
->group
->id
== group
->id
)
712 rc
+= sprintf(tmp
+ rc
, "wq%d.%d ",
717 rc
+= sprintf(tmp
+ rc
, "\n");
722 static struct device_attribute dev_attr_group_work_queues
=
723 __ATTR(work_queues
, 0444, group_work_queues_show
, NULL
);
725 static ssize_t
group_traffic_class_a_show(struct device
*dev
,
726 struct device_attribute
*attr
,
729 struct idxd_group
*group
=
730 container_of(dev
, struct idxd_group
, conf_dev
);
732 return sprintf(buf
, "%d\n", group
->tc_a
);
735 static ssize_t
group_traffic_class_a_store(struct device
*dev
,
736 struct device_attribute
*attr
,
737 const char *buf
, size_t count
)
739 struct idxd_group
*group
=
740 container_of(dev
, struct idxd_group
, conf_dev
);
741 struct idxd_device
*idxd
= group
->idxd
;
745 rc
= kstrtol(buf
, 10, &val
);
749 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
752 if (idxd
->state
== IDXD_DEV_ENABLED
)
755 if (val
< 0 || val
> 7)
762 static struct device_attribute dev_attr_group_traffic_class_a
=
763 __ATTR(traffic_class_a
, 0644, group_traffic_class_a_show
,
764 group_traffic_class_a_store
);
766 static ssize_t
group_traffic_class_b_show(struct device
*dev
,
767 struct device_attribute
*attr
,
770 struct idxd_group
*group
=
771 container_of(dev
, struct idxd_group
, conf_dev
);
773 return sprintf(buf
, "%d\n", group
->tc_b
);
776 static ssize_t
group_traffic_class_b_store(struct device
*dev
,
777 struct device_attribute
*attr
,
778 const char *buf
, size_t count
)
780 struct idxd_group
*group
=
781 container_of(dev
, struct idxd_group
, conf_dev
);
782 struct idxd_device
*idxd
= group
->idxd
;
786 rc
= kstrtol(buf
, 10, &val
);
790 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
793 if (idxd
->state
== IDXD_DEV_ENABLED
)
796 if (val
< 0 || val
> 7)
803 static struct device_attribute dev_attr_group_traffic_class_b
=
804 __ATTR(traffic_class_b
, 0644, group_traffic_class_b_show
,
805 group_traffic_class_b_store
);
807 static struct attribute
*idxd_group_attributes
[] = {
808 &dev_attr_group_work_queues
.attr
,
809 &dev_attr_group_engines
.attr
,
810 &dev_attr_group_use_token_limit
.attr
,
811 &dev_attr_group_tokens_allowed
.attr
,
812 &dev_attr_group_tokens_reserved
.attr
,
813 &dev_attr_group_traffic_class_a
.attr
,
814 &dev_attr_group_traffic_class_b
.attr
,
818 static const struct attribute_group idxd_group_attribute_group
= {
819 .attrs
= idxd_group_attributes
,
822 static const struct attribute_group
*idxd_group_attribute_groups
[] = {
823 &idxd_group_attribute_group
,
827 /* IDXD work queue attribs */
828 static ssize_t
wq_clients_show(struct device
*dev
,
829 struct device_attribute
*attr
, char *buf
)
831 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
833 return sprintf(buf
, "%d\n", wq
->client_count
);
836 static struct device_attribute dev_attr_wq_clients
=
837 __ATTR(clients
, 0444, wq_clients_show
, NULL
);
839 static ssize_t
wq_state_show(struct device
*dev
,
840 struct device_attribute
*attr
, char *buf
)
842 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
845 case IDXD_WQ_DISABLED
:
846 return sprintf(buf
, "disabled\n");
847 case IDXD_WQ_ENABLED
:
848 return sprintf(buf
, "enabled\n");
851 return sprintf(buf
, "unknown\n");
854 static struct device_attribute dev_attr_wq_state
=
855 __ATTR(state
, 0444, wq_state_show
, NULL
);
857 static ssize_t
wq_group_id_show(struct device
*dev
,
858 struct device_attribute
*attr
, char *buf
)
860 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
863 return sprintf(buf
, "%u\n", wq
->group
->id
);
865 return sprintf(buf
, "-1\n");
868 static ssize_t
wq_group_id_store(struct device
*dev
,
869 struct device_attribute
*attr
,
870 const char *buf
, size_t count
)
872 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
873 struct idxd_device
*idxd
= wq
->idxd
;
876 struct idxd_group
*prevg
, *group
;
878 rc
= kstrtol(buf
, 10, &id
);
882 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
885 if (wq
->state
!= IDXD_WQ_DISABLED
)
888 if (id
> idxd
->max_groups
- 1 || id
< -1)
893 wq
->group
->num_wqs
--;
899 group
= &idxd
->groups
[id
];
909 static struct device_attribute dev_attr_wq_group_id
=
910 __ATTR(group_id
, 0644, wq_group_id_show
, wq_group_id_store
);
912 static ssize_t
wq_mode_show(struct device
*dev
, struct device_attribute
*attr
,
915 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
917 return sprintf(buf
, "%s\n",
918 wq_dedicated(wq
) ? "dedicated" : "shared");
921 static ssize_t
wq_mode_store(struct device
*dev
,
922 struct device_attribute
*attr
, const char *buf
,
925 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
926 struct idxd_device
*idxd
= wq
->idxd
;
928 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
931 if (wq
->state
!= IDXD_WQ_DISABLED
)
934 if (sysfs_streq(buf
, "dedicated")) {
935 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
937 } else if (sysfs_streq(buf
, "shared") && device_swq_supported(idxd
)) {
938 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
946 static struct device_attribute dev_attr_wq_mode
=
947 __ATTR(mode
, 0644, wq_mode_show
, wq_mode_store
);
949 static ssize_t
wq_size_show(struct device
*dev
, struct device_attribute
*attr
,
952 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
954 return sprintf(buf
, "%u\n", wq
->size
);
957 static int total_claimed_wq_size(struct idxd_device
*idxd
)
962 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
963 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
971 static ssize_t
wq_size_store(struct device
*dev
,
972 struct device_attribute
*attr
, const char *buf
,
975 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
977 struct idxd_device
*idxd
= wq
->idxd
;
980 rc
= kstrtoul(buf
, 10, &size
);
984 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
987 if (idxd
->state
== IDXD_DEV_ENABLED
)
990 if (size
+ total_claimed_wq_size(idxd
) - wq
->size
> idxd
->max_wq_size
)
997 static struct device_attribute dev_attr_wq_size
=
998 __ATTR(size
, 0644, wq_size_show
, wq_size_store
);
1000 static ssize_t
wq_priority_show(struct device
*dev
,
1001 struct device_attribute
*attr
, char *buf
)
1003 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1005 return sprintf(buf
, "%u\n", wq
->priority
);
1008 static ssize_t
wq_priority_store(struct device
*dev
,
1009 struct device_attribute
*attr
,
1010 const char *buf
, size_t count
)
1012 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1014 struct idxd_device
*idxd
= wq
->idxd
;
1017 rc
= kstrtoul(buf
, 10, &prio
);
1021 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1024 if (wq
->state
!= IDXD_WQ_DISABLED
)
1027 if (prio
> IDXD_MAX_PRIORITY
)
1030 wq
->priority
= prio
;
1034 static struct device_attribute dev_attr_wq_priority
=
1035 __ATTR(priority
, 0644, wq_priority_show
, wq_priority_store
);
1037 static ssize_t
wq_block_on_fault_show(struct device
*dev
,
1038 struct device_attribute
*attr
, char *buf
)
1040 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1042 return sprintf(buf
, "%u\n",
1043 test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
));
1046 static ssize_t
wq_block_on_fault_store(struct device
*dev
,
1047 struct device_attribute
*attr
,
1048 const char *buf
, size_t count
)
1050 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1051 struct idxd_device
*idxd
= wq
->idxd
;
1055 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1058 if (wq
->state
!= IDXD_WQ_DISABLED
)
1061 rc
= kstrtobool(buf
, &bof
);
1066 set_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
1068 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
1073 static struct device_attribute dev_attr_wq_block_on_fault
=
1074 __ATTR(block_on_fault
, 0644, wq_block_on_fault_show
,
1075 wq_block_on_fault_store
);
1077 static ssize_t
wq_threshold_show(struct device
*dev
,
1078 struct device_attribute
*attr
, char *buf
)
1080 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1082 return sprintf(buf
, "%u\n", wq
->threshold
);
1085 static ssize_t
wq_threshold_store(struct device
*dev
,
1086 struct device_attribute
*attr
,
1087 const char *buf
, size_t count
)
1089 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1090 struct idxd_device
*idxd
= wq
->idxd
;
1094 rc
= kstrtouint(buf
, 0, &val
);
1098 if (val
> wq
->size
|| val
<= 0)
1101 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1104 if (wq
->state
!= IDXD_WQ_DISABLED
)
1107 if (test_bit(WQ_FLAG_DEDICATED
, &wq
->flags
))
1110 wq
->threshold
= val
;
1115 static struct device_attribute dev_attr_wq_threshold
=
1116 __ATTR(threshold
, 0644, wq_threshold_show
, wq_threshold_store
);
1118 static ssize_t
wq_type_show(struct device
*dev
,
1119 struct device_attribute
*attr
, char *buf
)
1121 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1124 case IDXD_WQT_KERNEL
:
1125 return sprintf(buf
, "%s\n",
1126 idxd_wq_type_names
[IDXD_WQT_KERNEL
]);
1128 return sprintf(buf
, "%s\n",
1129 idxd_wq_type_names
[IDXD_WQT_USER
]);
1132 return sprintf(buf
, "%s\n",
1133 idxd_wq_type_names
[IDXD_WQT_NONE
]);
1139 static ssize_t
wq_type_store(struct device
*dev
,
1140 struct device_attribute
*attr
, const char *buf
,
1143 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1144 enum idxd_wq_type old_type
;
1146 if (wq
->state
!= IDXD_WQ_DISABLED
)
1149 old_type
= wq
->type
;
1150 if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_NONE
]))
1151 wq
->type
= IDXD_WQT_NONE
;
1152 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_KERNEL
]))
1153 wq
->type
= IDXD_WQT_KERNEL
;
1154 else if (sysfs_streq(buf
, idxd_wq_type_names
[IDXD_WQT_USER
]))
1155 wq
->type
= IDXD_WQT_USER
;
1159 /* If we are changing queue type, clear the name */
1160 if (wq
->type
!= old_type
)
1161 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1166 static struct device_attribute dev_attr_wq_type
=
1167 __ATTR(type
, 0644, wq_type_show
, wq_type_store
);
1169 static ssize_t
wq_name_show(struct device
*dev
,
1170 struct device_attribute
*attr
, char *buf
)
1172 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1174 return sprintf(buf
, "%s\n", wq
->name
);
1177 static ssize_t
wq_name_store(struct device
*dev
,
1178 struct device_attribute
*attr
, const char *buf
,
1181 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1183 if (wq
->state
!= IDXD_WQ_DISABLED
)
1186 if (strlen(buf
) > WQ_NAME_SIZE
|| strlen(buf
) == 0)
1190 * This is temporarily placed here until we have SVM support for
1193 if (wq
->type
== IDXD_WQT_KERNEL
&& device_pasid_enabled(wq
->idxd
))
1196 memset(wq
->name
, 0, WQ_NAME_SIZE
+ 1);
1197 strncpy(wq
->name
, buf
, WQ_NAME_SIZE
);
1198 strreplace(wq
->name
, '\n', '\0');
1202 static struct device_attribute dev_attr_wq_name
=
1203 __ATTR(name
, 0644, wq_name_show
, wq_name_store
);
1205 static ssize_t
wq_cdev_minor_show(struct device
*dev
,
1206 struct device_attribute
*attr
, char *buf
)
1208 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1210 return sprintf(buf
, "%d\n", wq
->idxd_cdev
.minor
);
1213 static struct device_attribute dev_attr_wq_cdev_minor
=
1214 __ATTR(cdev_minor
, 0444, wq_cdev_minor_show
, NULL
);
1216 static int __get_sysfs_u64(const char *buf
, u64
*val
)
1220 rc
= kstrtou64(buf
, 0, val
);
1227 *val
= roundup_pow_of_two(*val
);
1231 static ssize_t
wq_max_transfer_size_show(struct device
*dev
, struct device_attribute
*attr
,
1234 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1236 return sprintf(buf
, "%llu\n", wq
->max_xfer_bytes
);
1239 static ssize_t
wq_max_transfer_size_store(struct device
*dev
, struct device_attribute
*attr
,
1240 const char *buf
, size_t count
)
1242 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1243 struct idxd_device
*idxd
= wq
->idxd
;
1247 if (wq
->state
!= IDXD_WQ_DISABLED
)
1250 rc
= __get_sysfs_u64(buf
, &xfer_size
);
1254 if (xfer_size
> idxd
->max_xfer_bytes
)
1257 wq
->max_xfer_bytes
= xfer_size
;
1262 static struct device_attribute dev_attr_wq_max_transfer_size
=
1263 __ATTR(max_transfer_size
, 0644,
1264 wq_max_transfer_size_show
, wq_max_transfer_size_store
);
1266 static ssize_t
wq_max_batch_size_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1268 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1270 return sprintf(buf
, "%u\n", wq
->max_batch_size
);
1273 static ssize_t
wq_max_batch_size_store(struct device
*dev
, struct device_attribute
*attr
,
1274 const char *buf
, size_t count
)
1276 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1277 struct idxd_device
*idxd
= wq
->idxd
;
1281 if (wq
->state
!= IDXD_WQ_DISABLED
)
1284 rc
= __get_sysfs_u64(buf
, &batch_size
);
1288 if (batch_size
> idxd
->max_batch_size
)
1291 wq
->max_batch_size
= (u32
)batch_size
;
1296 static struct device_attribute dev_attr_wq_max_batch_size
=
1297 __ATTR(max_batch_size
, 0644, wq_max_batch_size_show
, wq_max_batch_size_store
);
1299 static ssize_t
wq_ats_disable_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1301 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1303 return sprintf(buf
, "%u\n", wq
->ats_dis
);
1306 static ssize_t
wq_ats_disable_store(struct device
*dev
, struct device_attribute
*attr
,
1307 const char *buf
, size_t count
)
1309 struct idxd_wq
*wq
= container_of(dev
, struct idxd_wq
, conf_dev
);
1310 struct idxd_device
*idxd
= wq
->idxd
;
1314 if (wq
->state
!= IDXD_WQ_DISABLED
)
1317 if (!idxd
->hw
.wq_cap
.wq_ats_support
)
1320 rc
= kstrtobool(buf
, &ats_dis
);
1324 wq
->ats_dis
= ats_dis
;
1329 static struct device_attribute dev_attr_wq_ats_disable
=
1330 __ATTR(ats_disable
, 0644, wq_ats_disable_show
, wq_ats_disable_store
);
1332 static struct attribute
*idxd_wq_attributes
[] = {
1333 &dev_attr_wq_clients
.attr
,
1334 &dev_attr_wq_state
.attr
,
1335 &dev_attr_wq_group_id
.attr
,
1336 &dev_attr_wq_mode
.attr
,
1337 &dev_attr_wq_size
.attr
,
1338 &dev_attr_wq_priority
.attr
,
1339 &dev_attr_wq_block_on_fault
.attr
,
1340 &dev_attr_wq_threshold
.attr
,
1341 &dev_attr_wq_type
.attr
,
1342 &dev_attr_wq_name
.attr
,
1343 &dev_attr_wq_cdev_minor
.attr
,
1344 &dev_attr_wq_max_transfer_size
.attr
,
1345 &dev_attr_wq_max_batch_size
.attr
,
1346 &dev_attr_wq_ats_disable
.attr
,
1350 static const struct attribute_group idxd_wq_attribute_group
= {
1351 .attrs
= idxd_wq_attributes
,
1354 static const struct attribute_group
*idxd_wq_attribute_groups
[] = {
1355 &idxd_wq_attribute_group
,
1359 /* IDXD device attribs */
1360 static ssize_t
version_show(struct device
*dev
, struct device_attribute
*attr
,
1363 struct idxd_device
*idxd
=
1364 container_of(dev
, struct idxd_device
, conf_dev
);
1366 return sprintf(buf
, "%#x\n", idxd
->hw
.version
);
1368 static DEVICE_ATTR_RO(version
);
1370 static ssize_t
max_work_queues_size_show(struct device
*dev
,
1371 struct device_attribute
*attr
,
1374 struct idxd_device
*idxd
=
1375 container_of(dev
, struct idxd_device
, conf_dev
);
1377 return sprintf(buf
, "%u\n", idxd
->max_wq_size
);
1379 static DEVICE_ATTR_RO(max_work_queues_size
);
1381 static ssize_t
max_groups_show(struct device
*dev
,
1382 struct device_attribute
*attr
, char *buf
)
1384 struct idxd_device
*idxd
=
1385 container_of(dev
, struct idxd_device
, conf_dev
);
1387 return sprintf(buf
, "%u\n", idxd
->max_groups
);
1389 static DEVICE_ATTR_RO(max_groups
);
1391 static ssize_t
max_work_queues_show(struct device
*dev
,
1392 struct device_attribute
*attr
, char *buf
)
1394 struct idxd_device
*idxd
=
1395 container_of(dev
, struct idxd_device
, conf_dev
);
1397 return sprintf(buf
, "%u\n", idxd
->max_wqs
);
1399 static DEVICE_ATTR_RO(max_work_queues
);
1401 static ssize_t
max_engines_show(struct device
*dev
,
1402 struct device_attribute
*attr
, char *buf
)
1404 struct idxd_device
*idxd
=
1405 container_of(dev
, struct idxd_device
, conf_dev
);
1407 return sprintf(buf
, "%u\n", idxd
->max_engines
);
1409 static DEVICE_ATTR_RO(max_engines
);
1411 static ssize_t
numa_node_show(struct device
*dev
,
1412 struct device_attribute
*attr
, char *buf
)
1414 struct idxd_device
*idxd
=
1415 container_of(dev
, struct idxd_device
, conf_dev
);
1417 return sprintf(buf
, "%d\n", dev_to_node(&idxd
->pdev
->dev
));
1419 static DEVICE_ATTR_RO(numa_node
);
1421 static ssize_t
max_batch_size_show(struct device
*dev
,
1422 struct device_attribute
*attr
, char *buf
)
1424 struct idxd_device
*idxd
=
1425 container_of(dev
, struct idxd_device
, conf_dev
);
1427 return sprintf(buf
, "%u\n", idxd
->max_batch_size
);
1429 static DEVICE_ATTR_RO(max_batch_size
);
1431 static ssize_t
max_transfer_size_show(struct device
*dev
,
1432 struct device_attribute
*attr
,
1435 struct idxd_device
*idxd
=
1436 container_of(dev
, struct idxd_device
, conf_dev
);
1438 return sprintf(buf
, "%llu\n", idxd
->max_xfer_bytes
);
1440 static DEVICE_ATTR_RO(max_transfer_size
);
1442 static ssize_t
op_cap_show(struct device
*dev
,
1443 struct device_attribute
*attr
, char *buf
)
1445 struct idxd_device
*idxd
=
1446 container_of(dev
, struct idxd_device
, conf_dev
);
1449 for (i
= 0; i
< 4; i
++)
1450 rc
+= sysfs_emit_at(buf
, rc
, "%#llx ", idxd
->hw
.opcap
.bits
[i
]);
1453 rc
+= sysfs_emit_at(buf
, rc
, "\n");
1456 static DEVICE_ATTR_RO(op_cap
);
1458 static ssize_t
gen_cap_show(struct device
*dev
,
1459 struct device_attribute
*attr
, char *buf
)
1461 struct idxd_device
*idxd
=
1462 container_of(dev
, struct idxd_device
, conf_dev
);
1464 return sprintf(buf
, "%#llx\n", idxd
->hw
.gen_cap
.bits
);
1466 static DEVICE_ATTR_RO(gen_cap
);
1468 static ssize_t
configurable_show(struct device
*dev
,
1469 struct device_attribute
*attr
, char *buf
)
1471 struct idxd_device
*idxd
=
1472 container_of(dev
, struct idxd_device
, conf_dev
);
1474 return sprintf(buf
, "%u\n",
1475 test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
));
1477 static DEVICE_ATTR_RO(configurable
);
1479 static ssize_t
clients_show(struct device
*dev
,
1480 struct device_attribute
*attr
, char *buf
)
1482 struct idxd_device
*idxd
=
1483 container_of(dev
, struct idxd_device
, conf_dev
);
1484 unsigned long flags
;
1487 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1488 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1489 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1491 count
+= wq
->client_count
;
1493 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1495 return sprintf(buf
, "%d\n", count
);
1497 static DEVICE_ATTR_RO(clients
);
1499 static ssize_t
pasid_enabled_show(struct device
*dev
,
1500 struct device_attribute
*attr
, char *buf
)
1502 struct idxd_device
*idxd
=
1503 container_of(dev
, struct idxd_device
, conf_dev
);
1505 return sprintf(buf
, "%u\n", device_pasid_enabled(idxd
));
1507 static DEVICE_ATTR_RO(pasid_enabled
);
1509 static ssize_t
state_show(struct device
*dev
,
1510 struct device_attribute
*attr
, char *buf
)
1512 struct idxd_device
*idxd
=
1513 container_of(dev
, struct idxd_device
, conf_dev
);
1515 switch (idxd
->state
) {
1516 case IDXD_DEV_DISABLED
:
1517 case IDXD_DEV_CONF_READY
:
1518 return sprintf(buf
, "disabled\n");
1519 case IDXD_DEV_ENABLED
:
1520 return sprintf(buf
, "enabled\n");
1521 case IDXD_DEV_HALTED
:
1522 return sprintf(buf
, "halted\n");
1525 return sprintf(buf
, "unknown\n");
1527 static DEVICE_ATTR_RO(state
);
1529 static ssize_t
errors_show(struct device
*dev
,
1530 struct device_attribute
*attr
, char *buf
)
1532 struct idxd_device
*idxd
=
1533 container_of(dev
, struct idxd_device
, conf_dev
);
1535 unsigned long flags
;
1537 spin_lock_irqsave(&idxd
->dev_lock
, flags
);
1538 for (i
= 0; i
< 4; i
++)
1539 out
+= sprintf(buf
+ out
, "%#018llx ", idxd
->sw_err
.bits
[i
]);
1540 spin_unlock_irqrestore(&idxd
->dev_lock
, flags
);
1542 out
+= sprintf(buf
+ out
, "\n");
1545 static DEVICE_ATTR_RO(errors
);
1547 static ssize_t
max_tokens_show(struct device
*dev
,
1548 struct device_attribute
*attr
, char *buf
)
1550 struct idxd_device
*idxd
=
1551 container_of(dev
, struct idxd_device
, conf_dev
);
1553 return sprintf(buf
, "%u\n", idxd
->max_tokens
);
1555 static DEVICE_ATTR_RO(max_tokens
);
1557 static ssize_t
token_limit_show(struct device
*dev
,
1558 struct device_attribute
*attr
, char *buf
)
1560 struct idxd_device
*idxd
=
1561 container_of(dev
, struct idxd_device
, conf_dev
);
1563 return sprintf(buf
, "%u\n", idxd
->token_limit
);
1566 static ssize_t
token_limit_store(struct device
*dev
,
1567 struct device_attribute
*attr
,
1568 const char *buf
, size_t count
)
1570 struct idxd_device
*idxd
=
1571 container_of(dev
, struct idxd_device
, conf_dev
);
1575 rc
= kstrtoul(buf
, 10, &val
);
1579 if (idxd
->state
== IDXD_DEV_ENABLED
)
1582 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1585 if (!idxd
->hw
.group_cap
.token_limit
)
1588 if (val
> idxd
->hw
.group_cap
.total_tokens
)
1591 idxd
->token_limit
= val
;
1594 static DEVICE_ATTR_RW(token_limit
);
1596 static ssize_t
cdev_major_show(struct device
*dev
,
1597 struct device_attribute
*attr
, char *buf
)
1599 struct idxd_device
*idxd
=
1600 container_of(dev
, struct idxd_device
, conf_dev
);
1602 return sprintf(buf
, "%u\n", idxd
->major
);
1604 static DEVICE_ATTR_RO(cdev_major
);
1606 static ssize_t
cmd_status_show(struct device
*dev
,
1607 struct device_attribute
*attr
, char *buf
)
1609 struct idxd_device
*idxd
= container_of(dev
, struct idxd_device
, conf_dev
);
1611 return sprintf(buf
, "%#x\n", idxd
->cmd_status
);
1613 static DEVICE_ATTR_RO(cmd_status
);
1615 static struct attribute
*idxd_device_attributes
[] = {
1616 &dev_attr_version
.attr
,
1617 &dev_attr_max_groups
.attr
,
1618 &dev_attr_max_work_queues
.attr
,
1619 &dev_attr_max_work_queues_size
.attr
,
1620 &dev_attr_max_engines
.attr
,
1621 &dev_attr_numa_node
.attr
,
1622 &dev_attr_max_batch_size
.attr
,
1623 &dev_attr_max_transfer_size
.attr
,
1624 &dev_attr_op_cap
.attr
,
1625 &dev_attr_gen_cap
.attr
,
1626 &dev_attr_configurable
.attr
,
1627 &dev_attr_clients
.attr
,
1628 &dev_attr_pasid_enabled
.attr
,
1629 &dev_attr_state
.attr
,
1630 &dev_attr_errors
.attr
,
1631 &dev_attr_max_tokens
.attr
,
1632 &dev_attr_token_limit
.attr
,
1633 &dev_attr_cdev_major
.attr
,
1634 &dev_attr_cmd_status
.attr
,
1638 static const struct attribute_group idxd_device_attribute_group
= {
1639 .attrs
= idxd_device_attributes
,
1642 static const struct attribute_group
*idxd_attribute_groups
[] = {
1643 &idxd_device_attribute_group
,
1647 static int idxd_setup_engine_sysfs(struct idxd_device
*idxd
)
1649 struct device
*dev
= &idxd
->pdev
->dev
;
1652 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1653 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1655 engine
->conf_dev
.parent
= &idxd
->conf_dev
;
1656 dev_set_name(&engine
->conf_dev
, "engine%d.%d",
1657 idxd
->id
, engine
->id
);
1658 engine
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1659 engine
->conf_dev
.groups
= idxd_engine_attribute_groups
;
1660 engine
->conf_dev
.type
= &idxd_engine_device_type
;
1661 dev_dbg(dev
, "Engine device register: %s\n",
1662 dev_name(&engine
->conf_dev
));
1663 rc
= device_register(&engine
->conf_dev
);
1665 put_device(&engine
->conf_dev
);
1674 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1676 device_unregister(&engine
->conf_dev
);
1681 static int idxd_setup_group_sysfs(struct idxd_device
*idxd
)
1683 struct device
*dev
= &idxd
->pdev
->dev
;
1686 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1687 struct idxd_group
*group
= &idxd
->groups
[i
];
1689 group
->conf_dev
.parent
= &idxd
->conf_dev
;
1690 dev_set_name(&group
->conf_dev
, "group%d.%d",
1691 idxd
->id
, group
->id
);
1692 group
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1693 group
->conf_dev
.groups
= idxd_group_attribute_groups
;
1694 group
->conf_dev
.type
= &idxd_group_device_type
;
1695 dev_dbg(dev
, "Group device register: %s\n",
1696 dev_name(&group
->conf_dev
));
1697 rc
= device_register(&group
->conf_dev
);
1699 put_device(&group
->conf_dev
);
1708 struct idxd_group
*group
= &idxd
->groups
[i
];
1710 device_unregister(&group
->conf_dev
);
1715 static int idxd_setup_wq_sysfs(struct idxd_device
*idxd
)
1717 struct device
*dev
= &idxd
->pdev
->dev
;
1720 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1721 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1723 wq
->conf_dev
.parent
= &idxd
->conf_dev
;
1724 dev_set_name(&wq
->conf_dev
, "wq%d.%d", idxd
->id
, wq
->id
);
1725 wq
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1726 wq
->conf_dev
.groups
= idxd_wq_attribute_groups
;
1727 wq
->conf_dev
.type
= &idxd_wq_device_type
;
1728 dev_dbg(dev
, "WQ device register: %s\n",
1729 dev_name(&wq
->conf_dev
));
1730 rc
= device_register(&wq
->conf_dev
);
1732 put_device(&wq
->conf_dev
);
1741 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1743 device_unregister(&wq
->conf_dev
);
1748 static int idxd_setup_device_sysfs(struct idxd_device
*idxd
)
1750 struct device
*dev
= &idxd
->pdev
->dev
;
1752 char devname
[IDXD_NAME_SIZE
];
1754 sprintf(devname
, "%s%d", idxd_get_dev_name(idxd
), idxd
->id
);
1755 idxd
->conf_dev
.parent
= dev
;
1756 dev_set_name(&idxd
->conf_dev
, "%s", devname
);
1757 idxd
->conf_dev
.bus
= idxd_get_bus_type(idxd
);
1758 idxd
->conf_dev
.groups
= idxd_attribute_groups
;
1759 idxd
->conf_dev
.type
= idxd_get_device_type(idxd
);
1761 dev_dbg(dev
, "IDXD device register: %s\n", dev_name(&idxd
->conf_dev
));
1762 rc
= device_register(&idxd
->conf_dev
);
1764 put_device(&idxd
->conf_dev
);
1771 int idxd_setup_sysfs(struct idxd_device
*idxd
)
1773 struct device
*dev
= &idxd
->pdev
->dev
;
1776 rc
= idxd_setup_device_sysfs(idxd
);
1778 dev_dbg(dev
, "Device sysfs registering failed: %d\n", rc
);
1782 rc
= idxd_setup_wq_sysfs(idxd
);
1784 /* unregister conf dev */
1785 dev_dbg(dev
, "Work Queue sysfs registering failed: %d\n", rc
);
1789 rc
= idxd_setup_group_sysfs(idxd
);
1791 /* unregister conf dev */
1792 dev_dbg(dev
, "Group sysfs registering failed: %d\n", rc
);
1796 rc
= idxd_setup_engine_sysfs(idxd
);
1798 /* unregister conf dev */
1799 dev_dbg(dev
, "Engine sysfs registering failed: %d\n", rc
);
1806 void idxd_cleanup_sysfs(struct idxd_device
*idxd
)
1810 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1811 struct idxd_wq
*wq
= &idxd
->wqs
[i
];
1813 device_unregister(&wq
->conf_dev
);
1816 for (i
= 0; i
< idxd
->max_engines
; i
++) {
1817 struct idxd_engine
*engine
= &idxd
->engines
[i
];
1819 device_unregister(&engine
->conf_dev
);
1822 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1823 struct idxd_group
*group
= &idxd
->groups
[i
];
1825 device_unregister(&group
->conf_dev
);
1828 device_unregister(&idxd
->conf_dev
);
1831 int idxd_register_bus_type(void)
1835 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++) {
1836 rc
= bus_register(idxd_bus_types
[i
]);
1845 bus_unregister(idxd_bus_types
[i
]);
1849 void idxd_unregister_bus_type(void)
1853 for (i
= 0; i
< IDXD_TYPE_MAX
; i
++)
1854 bus_unregister(idxd_bus_types
[i
]);