]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/dma/idxd/sysfs.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / drivers / dma / idxd / sysfs.c
CommitLineData
c52ca478
DJ
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
42d279f9 16 [IDXD_WQT_USER] = "user",
c52ca478
DJ
17};
18
19static void idxd_conf_device_release(struct device *dev)
20{
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22}
23
24static struct device_type idxd_group_device_type = {
25 .name = "group",
26 .release = idxd_conf_device_release,
27};
28
29static struct device_type idxd_wq_device_type = {
30 .name = "wq",
31 .release = idxd_conf_device_release,
32};
33
34static struct device_type idxd_engine_device_type = {
35 .name = "engine",
36 .release = idxd_conf_device_release,
37};
38
39static struct device_type dsa_device_type = {
40 .name = "dsa",
41 .release = idxd_conf_device_release,
42};
43
44static inline bool is_dsa_dev(struct device *dev)
45{
46 return dev ? dev->type == &dsa_device_type : false;
47}
48
49static inline bool is_idxd_dev(struct device *dev)
50{
51 return is_dsa_dev(dev);
52}
53
54static inline bool is_idxd_wq_dev(struct device *dev)
55{
56 return dev ? dev->type == &idxd_wq_device_type : false;
57}
58
8f47d1a5
DJ
59static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60{
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
63 return true;
64 return false;
65}
66
42d279f9
DJ
67static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68{
a9113a90 69 return wq->type == IDXD_WQT_USER;
42d279f9
DJ
70}
71
c52ca478
DJ
72static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
74{
75 int matched = 0;
76
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
79
80 if (idxd->state != IDXD_DEV_CONF_READY)
81 return 0;
82 matched = 1;
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
86
87 if (idxd->state < IDXD_DEV_CONF_READY)
88 return 0;
89
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 return 0;
93 }
94 matched = 1;
95 }
96
97 if (matched)
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
99
100 return matched;
101}
102
103static int idxd_config_bus_probe(struct device *dev)
104{
105 int rc;
106 unsigned long flags;
107
108 dev_dbg(dev, "%s called\n", __func__);
109
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
112
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
115 return -EBUSY;
116 }
117
42d279f9
DJ
118 if (!try_module_get(THIS_MODULE))
119 return -ENXIO;
120
c52ca478
DJ
121 spin_lock_irqsave(&idxd->dev_lock, flags);
122
123 /* Perform IDXD configuration and enabling */
124 rc = idxd_device_config(idxd);
125 if (rc < 0) {
126 spin_unlock_irqrestore(&idxd->dev_lock, flags);
61b5865d 127 module_put(THIS_MODULE);
c52ca478
DJ
128 dev_warn(dev, "Device config failed: %d\n", rc);
129 return rc;
130 }
131
132 /* start device */
133 rc = idxd_device_enable(idxd);
134 if (rc < 0) {
135 spin_unlock_irqrestore(&idxd->dev_lock, flags);
61b5865d 136 module_put(THIS_MODULE);
c52ca478
DJ
137 dev_warn(dev, "Device enable failed: %d\n", rc);
138 return rc;
139 }
140
141 spin_unlock_irqrestore(&idxd->dev_lock, flags);
142 dev_info(dev, "Device %s enabled\n", dev_name(dev));
143
8f47d1a5
DJ
144 rc = idxd_register_dma_device(idxd);
145 if (rc < 0) {
146 spin_unlock_irqrestore(&idxd->dev_lock, flags);
61b5865d 147 module_put(THIS_MODULE);
8f47d1a5
DJ
148 dev_dbg(dev, "Failed to register dmaengine device\n");
149 return rc;
150 }
c52ca478
DJ
151 return 0;
152 } else if (is_idxd_wq_dev(dev)) {
153 struct idxd_wq *wq = confdev_to_wq(dev);
154 struct idxd_device *idxd = wq->idxd;
155
156 mutex_lock(&wq->wq_lock);
157
158 if (idxd->state != IDXD_DEV_ENABLED) {
159 mutex_unlock(&wq->wq_lock);
160 dev_warn(dev, "Enabling while device not enabled.\n");
161 return -EPERM;
162 }
163
164 if (wq->state != IDXD_WQ_DISABLED) {
165 mutex_unlock(&wq->wq_lock);
166 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
167 return -EBUSY;
168 }
169
170 if (!wq->group) {
171 mutex_unlock(&wq->wq_lock);
172 dev_warn(dev, "WQ not attached to group.\n");
173 return -EINVAL;
174 }
175
176 if (strlen(wq->name) == 0) {
177 mutex_unlock(&wq->wq_lock);
178 dev_warn(dev, "WQ name not set.\n");
179 return -EINVAL;
180 }
181
182 rc = idxd_wq_alloc_resources(wq);
183 if (rc < 0) {
184 mutex_unlock(&wq->wq_lock);
185 dev_warn(dev, "WQ resource alloc failed\n");
186 return rc;
187 }
188
189 spin_lock_irqsave(&idxd->dev_lock, flags);
190 rc = idxd_device_config(idxd);
191 if (rc < 0) {
192 spin_unlock_irqrestore(&idxd->dev_lock, flags);
193 mutex_unlock(&wq->wq_lock);
194 dev_warn(dev, "Writing WQ %d config failed: %d\n",
195 wq->id, rc);
196 return rc;
197 }
198
199 rc = idxd_wq_enable(wq);
200 if (rc < 0) {
201 spin_unlock_irqrestore(&idxd->dev_lock, flags);
202 mutex_unlock(&wq->wq_lock);
203 dev_warn(dev, "WQ %d enabling failed: %d\n",
204 wq->id, rc);
205 return rc;
206 }
207 spin_unlock_irqrestore(&idxd->dev_lock, flags);
208
209 rc = idxd_wq_map_portal(wq);
210 if (rc < 0) {
211 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
212 rc = idxd_wq_disable(wq);
213 if (rc < 0)
214 dev_warn(dev, "IDXD wq disable failed\n");
215 spin_unlock_irqrestore(&idxd->dev_lock, flags);
216 mutex_unlock(&wq->wq_lock);
217 return rc;
218 }
219
220 wq->client_count = 0;
221
222 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
8f47d1a5
DJ
223
224 if (is_idxd_wq_dmaengine(wq)) {
225 rc = idxd_register_dma_channel(wq);
226 if (rc < 0) {
227 dev_dbg(dev, "DMA channel register failed\n");
228 mutex_unlock(&wq->wq_lock);
229 return rc;
230 }
42d279f9
DJ
231 } else if (is_idxd_wq_cdev(wq)) {
232 rc = idxd_wq_add_cdev(wq);
233 if (rc < 0) {
234 dev_dbg(dev, "Cdev creation failed\n");
235 mutex_unlock(&wq->wq_lock);
236 return rc;
237 }
8f47d1a5
DJ
238 }
239
c52ca478
DJ
240 mutex_unlock(&wq->wq_lock);
241 return 0;
242 }
243
244 return -ENODEV;
245}
246
247static void disable_wq(struct idxd_wq *wq)
248{
249 struct idxd_device *idxd = wq->idxd;
250 struct device *dev = &idxd->pdev->dev;
251 unsigned long flags;
252 int rc;
253
254 mutex_lock(&wq->wq_lock);
255 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
256 if (wq->state == IDXD_WQ_DISABLED) {
257 mutex_unlock(&wq->wq_lock);
258 return;
259 }
260
8f47d1a5
DJ
261 if (is_idxd_wq_dmaengine(wq))
262 idxd_unregister_dma_channel(wq);
42d279f9
DJ
263 else if (is_idxd_wq_cdev(wq))
264 idxd_wq_del_cdev(wq);
8f47d1a5 265
c52ca478
DJ
266 if (idxd_wq_refcount(wq))
267 dev_warn(dev, "Clients has claim on wq %d: %d\n",
268 wq->id, idxd_wq_refcount(wq));
269
270 idxd_wq_unmap_portal(wq);
271
272 spin_lock_irqsave(&idxd->dev_lock, flags);
273 rc = idxd_wq_disable(wq);
274 spin_unlock_irqrestore(&idxd->dev_lock, flags);
275
276 idxd_wq_free_resources(wq);
277 wq->client_count = 0;
278 mutex_unlock(&wq->wq_lock);
279
280 if (rc < 0)
281 dev_warn(dev, "Failed to disable %s: %d\n",
282 dev_name(&wq->conf_dev), rc);
283 else
284 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
285}
286
287static int idxd_config_bus_remove(struct device *dev)
288{
289 int rc;
290 unsigned long flags;
291
292 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
293
294 /* disable workqueue here */
295 if (is_idxd_wq_dev(dev)) {
296 struct idxd_wq *wq = confdev_to_wq(dev);
297
298 disable_wq(wq);
299 } else if (is_idxd_dev(dev)) {
300 struct idxd_device *idxd = confdev_to_idxd(dev);
301 int i;
302
303 dev_dbg(dev, "%s removing dev %s\n", __func__,
304 dev_name(&idxd->conf_dev));
305 for (i = 0; i < idxd->max_wqs; i++) {
306 struct idxd_wq *wq = &idxd->wqs[i];
307
308 if (wq->state == IDXD_WQ_DISABLED)
309 continue;
310 dev_warn(dev, "Active wq %d on disable %s.\n", i,
311 dev_name(&idxd->conf_dev));
312 device_release_driver(&wq->conf_dev);
313 }
314
8f47d1a5 315 idxd_unregister_dma_device(idxd);
c52ca478
DJ
316 spin_lock_irqsave(&idxd->dev_lock, flags);
317 rc = idxd_device_disable(idxd);
318 spin_unlock_irqrestore(&idxd->dev_lock, flags);
42d279f9 319 module_put(THIS_MODULE);
c52ca478
DJ
320 if (rc < 0)
321 dev_warn(dev, "Device disable failed\n");
322 else
323 dev_info(dev, "Device %s disabled\n", dev_name(dev));
42d279f9 324
c52ca478
DJ
325 }
326
327 return 0;
328}
329
330static void idxd_config_bus_shutdown(struct device *dev)
331{
332 dev_dbg(dev, "%s called\n", __func__);
333}
334
42d279f9 335struct bus_type dsa_bus_type = {
c52ca478
DJ
336 .name = "dsa",
337 .match = idxd_config_bus_match,
338 .probe = idxd_config_bus_probe,
339 .remove = idxd_config_bus_remove,
340 .shutdown = idxd_config_bus_shutdown,
341};
342
343static struct bus_type *idxd_bus_types[] = {
344 &dsa_bus_type
345};
346
347static struct idxd_device_driver dsa_drv = {
348 .drv = {
349 .name = "dsa",
350 .bus = &dsa_bus_type,
351 .owner = THIS_MODULE,
352 .mod_name = KBUILD_MODNAME,
353 },
354};
355
356static struct idxd_device_driver *idxd_drvs[] = {
357 &dsa_drv
358};
359
42d279f9 360struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
c52ca478
DJ
361{
362 return idxd_bus_types[idxd->type];
363}
364
365static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
366{
367 if (idxd->type == IDXD_TYPE_DSA)
368 return &dsa_device_type;
369 else
370 return NULL;
371}
372
373/* IDXD generic driver setup */
374int idxd_register_driver(void)
375{
376 int i, rc;
377
378 for (i = 0; i < IDXD_TYPE_MAX; i++) {
379 rc = driver_register(&idxd_drvs[i]->drv);
380 if (rc < 0)
381 goto drv_fail;
382 }
383
384 return 0;
385
386drv_fail:
387 for (; i > 0; i--)
388 driver_unregister(&idxd_drvs[i]->drv);
389 return rc;
390}
391
392void idxd_unregister_driver(void)
393{
394 int i;
395
396 for (i = 0; i < IDXD_TYPE_MAX; i++)
397 driver_unregister(&idxd_drvs[i]->drv);
398}
399
400/* IDXD engine attributes */
401static ssize_t engine_group_id_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
403{
404 struct idxd_engine *engine =
405 container_of(dev, struct idxd_engine, conf_dev);
406
407 if (engine->group)
408 return sprintf(buf, "%d\n", engine->group->id);
409 else
410 return sprintf(buf, "%d\n", -1);
411}
412
413static ssize_t engine_group_id_store(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
416{
417 struct idxd_engine *engine =
418 container_of(dev, struct idxd_engine, conf_dev);
419 struct idxd_device *idxd = engine->idxd;
420 long id;
421 int rc;
422 struct idxd_group *prevg, *group;
423
424 rc = kstrtol(buf, 10, &id);
425 if (rc < 0)
426 return -EINVAL;
427
428 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
429 return -EPERM;
430
431 if (id > idxd->max_groups - 1 || id < -1)
432 return -EINVAL;
433
434 if (id == -1) {
435 if (engine->group) {
436 engine->group->num_engines--;
437 engine->group = NULL;
438 }
439 return count;
440 }
441
442 group = &idxd->groups[id];
443 prevg = engine->group;
444
445 if (prevg)
446 prevg->num_engines--;
447 engine->group = &idxd->groups[id];
448 engine->group->num_engines++;
449
450 return count;
451}
452
453static struct device_attribute dev_attr_engine_group =
454 __ATTR(group_id, 0644, engine_group_id_show,
455 engine_group_id_store);
456
457static struct attribute *idxd_engine_attributes[] = {
458 &dev_attr_engine_group.attr,
459 NULL,
460};
461
462static const struct attribute_group idxd_engine_attribute_group = {
463 .attrs = idxd_engine_attributes,
464};
465
466static const struct attribute_group *idxd_engine_attribute_groups[] = {
467 &idxd_engine_attribute_group,
468 NULL,
469};
470
471/* Group attributes */
472
473static void idxd_set_free_tokens(struct idxd_device *idxd)
474{
475 int i, tokens;
476
477 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
478 struct idxd_group *g = &idxd->groups[i];
479
480 tokens += g->tokens_reserved;
481 }
482
483 idxd->nr_tokens = idxd->max_tokens - tokens;
484}
485
486static ssize_t group_tokens_reserved_show(struct device *dev,
487 struct device_attribute *attr,
488 char *buf)
489{
490 struct idxd_group *group =
491 container_of(dev, struct idxd_group, conf_dev);
492
493 return sprintf(buf, "%u\n", group->tokens_reserved);
494}
495
496static ssize_t group_tokens_reserved_store(struct device *dev,
497 struct device_attribute *attr,
498 const char *buf, size_t count)
499{
500 struct idxd_group *group =
501 container_of(dev, struct idxd_group, conf_dev);
502 struct idxd_device *idxd = group->idxd;
503 unsigned long val;
504 int rc;
505
506 rc = kstrtoul(buf, 10, &val);
507 if (rc < 0)
508 return -EINVAL;
509
510 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
511 return -EPERM;
512
513 if (idxd->state == IDXD_DEV_ENABLED)
514 return -EPERM;
515
516 if (idxd->token_limit == 0)
517 return -EPERM;
518
519 if (val > idxd->max_tokens)
520 return -EINVAL;
521
2d0b1919 522 if (val > idxd->nr_tokens + group->tokens_reserved)
c52ca478
DJ
523 return -EINVAL;
524
525 group->tokens_reserved = val;
526 idxd_set_free_tokens(idxd);
527 return count;
528}
529
530static struct device_attribute dev_attr_group_tokens_reserved =
531 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
532 group_tokens_reserved_store);
533
534static ssize_t group_tokens_allowed_show(struct device *dev,
535 struct device_attribute *attr,
536 char *buf)
537{
538 struct idxd_group *group =
539 container_of(dev, struct idxd_group, conf_dev);
540
541 return sprintf(buf, "%u\n", group->tokens_allowed);
542}
543
544static ssize_t group_tokens_allowed_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
547{
548 struct idxd_group *group =
549 container_of(dev, struct idxd_group, conf_dev);
550 struct idxd_device *idxd = group->idxd;
551 unsigned long val;
552 int rc;
553
554 rc = kstrtoul(buf, 10, &val);
555 if (rc < 0)
556 return -EINVAL;
557
558 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
559 return -EPERM;
560
561 if (idxd->state == IDXD_DEV_ENABLED)
562 return -EPERM;
563
564 if (idxd->token_limit == 0)
565 return -EPERM;
566 if (val < 4 * group->num_engines ||
567 val > group->tokens_reserved + idxd->nr_tokens)
568 return -EINVAL;
569
570 group->tokens_allowed = val;
571 return count;
572}
573
574static struct device_attribute dev_attr_group_tokens_allowed =
575 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
576 group_tokens_allowed_store);
577
578static ssize_t group_use_token_limit_show(struct device *dev,
579 struct device_attribute *attr,
580 char *buf)
581{
582 struct idxd_group *group =
583 container_of(dev, struct idxd_group, conf_dev);
584
585 return sprintf(buf, "%u\n", group->use_token_limit);
586}
587
588static ssize_t group_use_token_limit_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t count)
591{
592 struct idxd_group *group =
593 container_of(dev, struct idxd_group, conf_dev);
594 struct idxd_device *idxd = group->idxd;
595 unsigned long val;
596 int rc;
597
598 rc = kstrtoul(buf, 10, &val);
599 if (rc < 0)
600 return -EINVAL;
601
602 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
603 return -EPERM;
604
605 if (idxd->state == IDXD_DEV_ENABLED)
606 return -EPERM;
607
608 if (idxd->token_limit == 0)
609 return -EPERM;
610
611 group->use_token_limit = !!val;
612 return count;
613}
614
615static struct device_attribute dev_attr_group_use_token_limit =
616 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
617 group_use_token_limit_store);
618
619static ssize_t group_engines_show(struct device *dev,
620 struct device_attribute *attr, char *buf)
621{
622 struct idxd_group *group =
623 container_of(dev, struct idxd_group, conf_dev);
624 int i, rc = 0;
625 char *tmp = buf;
626 struct idxd_device *idxd = group->idxd;
627
628 for (i = 0; i < idxd->max_engines; i++) {
629 struct idxd_engine *engine = &idxd->engines[i];
630
631 if (!engine->group)
632 continue;
633
634 if (engine->group->id == group->id)
635 rc += sprintf(tmp + rc, "engine%d.%d ",
636 idxd->id, engine->id);
637 }
638
639 rc--;
640 rc += sprintf(tmp + rc, "\n");
641
642 return rc;
643}
644
645static struct device_attribute dev_attr_group_engines =
646 __ATTR(engines, 0444, group_engines_show, NULL);
647
648static ssize_t group_work_queues_show(struct device *dev,
649 struct device_attribute *attr, char *buf)
650{
651 struct idxd_group *group =
652 container_of(dev, struct idxd_group, conf_dev);
653 int i, rc = 0;
654 char *tmp = buf;
655 struct idxd_device *idxd = group->idxd;
656
657 for (i = 0; i < idxd->max_wqs; i++) {
658 struct idxd_wq *wq = &idxd->wqs[i];
659
660 if (!wq->group)
661 continue;
662
663 if (wq->group->id == group->id)
664 rc += sprintf(tmp + rc, "wq%d.%d ",
665 idxd->id, wq->id);
666 }
667
668 rc--;
669 rc += sprintf(tmp + rc, "\n");
670
671 return rc;
672}
673
674static struct device_attribute dev_attr_group_work_queues =
675 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
676
677static ssize_t group_traffic_class_a_show(struct device *dev,
678 struct device_attribute *attr,
679 char *buf)
680{
681 struct idxd_group *group =
682 container_of(dev, struct idxd_group, conf_dev);
683
684 return sprintf(buf, "%d\n", group->tc_a);
685}
686
687static ssize_t group_traffic_class_a_store(struct device *dev,
688 struct device_attribute *attr,
689 const char *buf, size_t count)
690{
691 struct idxd_group *group =
692 container_of(dev, struct idxd_group, conf_dev);
693 struct idxd_device *idxd = group->idxd;
694 long val;
695 int rc;
696
697 rc = kstrtol(buf, 10, &val);
698 if (rc < 0)
699 return -EINVAL;
700
701 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
702 return -EPERM;
703
704 if (idxd->state == IDXD_DEV_ENABLED)
705 return -EPERM;
706
707 if (val < 0 || val > 7)
708 return -EINVAL;
709
710 group->tc_a = val;
711 return count;
712}
713
714static struct device_attribute dev_attr_group_traffic_class_a =
715 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
716 group_traffic_class_a_store);
717
718static ssize_t group_traffic_class_b_show(struct device *dev,
719 struct device_attribute *attr,
720 char *buf)
721{
722 struct idxd_group *group =
723 container_of(dev, struct idxd_group, conf_dev);
724
725 return sprintf(buf, "%d\n", group->tc_b);
726}
727
728static ssize_t group_traffic_class_b_store(struct device *dev,
729 struct device_attribute *attr,
730 const char *buf, size_t count)
731{
732 struct idxd_group *group =
733 container_of(dev, struct idxd_group, conf_dev);
734 struct idxd_device *idxd = group->idxd;
735 long val;
736 int rc;
737
738 rc = kstrtol(buf, 10, &val);
739 if (rc < 0)
740 return -EINVAL;
741
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
743 return -EPERM;
744
745 if (idxd->state == IDXD_DEV_ENABLED)
746 return -EPERM;
747
748 if (val < 0 || val > 7)
749 return -EINVAL;
750
751 group->tc_b = val;
752 return count;
753}
754
755static struct device_attribute dev_attr_group_traffic_class_b =
756 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
757 group_traffic_class_b_store);
758
759static struct attribute *idxd_group_attributes[] = {
760 &dev_attr_group_work_queues.attr,
761 &dev_attr_group_engines.attr,
762 &dev_attr_group_use_token_limit.attr,
763 &dev_attr_group_tokens_allowed.attr,
764 &dev_attr_group_tokens_reserved.attr,
765 &dev_attr_group_traffic_class_a.attr,
766 &dev_attr_group_traffic_class_b.attr,
767 NULL,
768};
769
770static const struct attribute_group idxd_group_attribute_group = {
771 .attrs = idxd_group_attributes,
772};
773
774static const struct attribute_group *idxd_group_attribute_groups[] = {
775 &idxd_group_attribute_group,
776 NULL,
777};
778
779/* IDXD work queue attribs */
780static ssize_t wq_clients_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
784
785 return sprintf(buf, "%d\n", wq->client_count);
786}
787
788static struct device_attribute dev_attr_wq_clients =
789 __ATTR(clients, 0444, wq_clients_show, NULL);
790
791static ssize_t wq_state_show(struct device *dev,
792 struct device_attribute *attr, char *buf)
793{
794 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
795
796 switch (wq->state) {
797 case IDXD_WQ_DISABLED:
798 return sprintf(buf, "disabled\n");
799 case IDXD_WQ_ENABLED:
800 return sprintf(buf, "enabled\n");
801 }
802
803 return sprintf(buf, "unknown\n");
804}
805
806static struct device_attribute dev_attr_wq_state =
807 __ATTR(state, 0444, wq_state_show, NULL);
808
809static ssize_t wq_group_id_show(struct device *dev,
810 struct device_attribute *attr, char *buf)
811{
812 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
813
814 if (wq->group)
815 return sprintf(buf, "%u\n", wq->group->id);
816 else
817 return sprintf(buf, "-1\n");
818}
819
820static ssize_t wq_group_id_store(struct device *dev,
821 struct device_attribute *attr,
822 const char *buf, size_t count)
823{
824 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
825 struct idxd_device *idxd = wq->idxd;
826 long id;
827 int rc;
828 struct idxd_group *prevg, *group;
829
830 rc = kstrtol(buf, 10, &id);
831 if (rc < 0)
832 return -EINVAL;
833
834 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
835 return -EPERM;
836
837 if (wq->state != IDXD_WQ_DISABLED)
838 return -EPERM;
839
840 if (id > idxd->max_groups - 1 || id < -1)
841 return -EINVAL;
842
843 if (id == -1) {
844 if (wq->group) {
845 wq->group->num_wqs--;
846 wq->group = NULL;
847 }
848 return count;
849 }
850
851 group = &idxd->groups[id];
852 prevg = wq->group;
853
854 if (prevg)
855 prevg->num_wqs--;
856 wq->group = group;
857 group->num_wqs++;
858 return count;
859}
860
861static struct device_attribute dev_attr_wq_group_id =
862 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
863
864static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
865 char *buf)
866{
867 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
868
869 return sprintf(buf, "%s\n",
870 wq_dedicated(wq) ? "dedicated" : "shared");
871}
872
873static ssize_t wq_mode_store(struct device *dev,
874 struct device_attribute *attr, const char *buf,
875 size_t count)
876{
877 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
878 struct idxd_device *idxd = wq->idxd;
879
880 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
881 return -EPERM;
882
883 if (wq->state != IDXD_WQ_DISABLED)
884 return -EPERM;
885
886 if (sysfs_streq(buf, "dedicated")) {
887 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
888 wq->threshold = 0;
889 } else {
890 return -EINVAL;
891 }
892
893 return count;
894}
895
896static struct device_attribute dev_attr_wq_mode =
897 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
898
899static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
900 char *buf)
901{
902 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
903
904 return sprintf(buf, "%u\n", wq->size);
905}
906
50e7e7f6
DJ
907static int total_claimed_wq_size(struct idxd_device *idxd)
908{
909 int i;
910 int wq_size = 0;
911
912 for (i = 0; i < idxd->max_wqs; i++) {
913 struct idxd_wq *wq = &idxd->wqs[i];
914
915 wq_size += wq->size;
916 }
917
918 return wq_size;
919}
920
c52ca478
DJ
921static ssize_t wq_size_store(struct device *dev,
922 struct device_attribute *attr, const char *buf,
923 size_t count)
924{
925 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
926 unsigned long size;
927 struct idxd_device *idxd = wq->idxd;
928 int rc;
929
930 rc = kstrtoul(buf, 10, &size);
931 if (rc < 0)
932 return -EINVAL;
933
934 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
935 return -EPERM;
936
937 if (wq->state != IDXD_WQ_DISABLED)
938 return -EPERM;
939
50e7e7f6 940 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
c52ca478
DJ
941 return -EINVAL;
942
943 wq->size = size;
944 return count;
945}
946
947static struct device_attribute dev_attr_wq_size =
948 __ATTR(size, 0644, wq_size_show, wq_size_store);
949
950static ssize_t wq_priority_show(struct device *dev,
951 struct device_attribute *attr, char *buf)
952{
953 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
954
955 return sprintf(buf, "%u\n", wq->priority);
956}
957
958static ssize_t wq_priority_store(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t count)
961{
962 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
963 unsigned long prio;
964 struct idxd_device *idxd = wq->idxd;
965 int rc;
966
967 rc = kstrtoul(buf, 10, &prio);
968 if (rc < 0)
969 return -EINVAL;
970
971 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
972 return -EPERM;
973
974 if (wq->state != IDXD_WQ_DISABLED)
975 return -EPERM;
976
977 if (prio > IDXD_MAX_PRIORITY)
978 return -EINVAL;
979
980 wq->priority = prio;
981 return count;
982}
983
984static struct device_attribute dev_attr_wq_priority =
985 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
986
987static ssize_t wq_type_show(struct device *dev,
988 struct device_attribute *attr, char *buf)
989{
990 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
991
992 switch (wq->type) {
993 case IDXD_WQT_KERNEL:
994 return sprintf(buf, "%s\n",
995 idxd_wq_type_names[IDXD_WQT_KERNEL]);
42d279f9
DJ
996 case IDXD_WQT_USER:
997 return sprintf(buf, "%s\n",
998 idxd_wq_type_names[IDXD_WQT_USER]);
c52ca478
DJ
999 case IDXD_WQT_NONE:
1000 default:
1001 return sprintf(buf, "%s\n",
1002 idxd_wq_type_names[IDXD_WQT_NONE]);
1003 }
1004
1005 return -EINVAL;
1006}
1007
1008static ssize_t wq_type_store(struct device *dev,
1009 struct device_attribute *attr, const char *buf,
1010 size_t count)
1011{
1012 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1013 enum idxd_wq_type old_type;
1014
1015 if (wq->state != IDXD_WQ_DISABLED)
1016 return -EPERM;
1017
1018 old_type = wq->type;
88402c5b
DJ
1019 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1020 wq->type = IDXD_WQT_NONE;
1021 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
c52ca478 1022 wq->type = IDXD_WQT_KERNEL;
42d279f9
DJ
1023 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1024 wq->type = IDXD_WQT_USER;
c52ca478 1025 else
88402c5b 1026 return -EINVAL;
c52ca478
DJ
1027
1028 /* If we are changing queue type, clear the name */
1029 if (wq->type != old_type)
1030 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1031
1032 return count;
1033}
1034
1035static struct device_attribute dev_attr_wq_type =
1036 __ATTR(type, 0644, wq_type_show, wq_type_store);
1037
1038static ssize_t wq_name_show(struct device *dev,
1039 struct device_attribute *attr, char *buf)
1040{
1041 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1042
1043 return sprintf(buf, "%s\n", wq->name);
1044}
1045
1046static ssize_t wq_name_store(struct device *dev,
1047 struct device_attribute *attr, const char *buf,
1048 size_t count)
1049{
1050 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1051
1052 if (wq->state != IDXD_WQ_DISABLED)
1053 return -EPERM;
1054
1055 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1056 return -EINVAL;
1057
1058 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1059 strncpy(wq->name, buf, WQ_NAME_SIZE);
1060 strreplace(wq->name, '\n', '\0');
1061 return count;
1062}
1063
1064static struct device_attribute dev_attr_wq_name =
1065 __ATTR(name, 0644, wq_name_show, wq_name_store);
1066
42d279f9
DJ
1067static ssize_t wq_cdev_minor_show(struct device *dev,
1068 struct device_attribute *attr, char *buf)
1069{
1070 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1071
1072 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1073}
1074
1075static struct device_attribute dev_attr_wq_cdev_minor =
1076 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1077
c52ca478
DJ
1078static struct attribute *idxd_wq_attributes[] = {
1079 &dev_attr_wq_clients.attr,
1080 &dev_attr_wq_state.attr,
1081 &dev_attr_wq_group_id.attr,
1082 &dev_attr_wq_mode.attr,
1083 &dev_attr_wq_size.attr,
1084 &dev_attr_wq_priority.attr,
1085 &dev_attr_wq_type.attr,
1086 &dev_attr_wq_name.attr,
42d279f9 1087 &dev_attr_wq_cdev_minor.attr,
c52ca478
DJ
1088 NULL,
1089};
1090
1091static const struct attribute_group idxd_wq_attribute_group = {
1092 .attrs = idxd_wq_attributes,
1093};
1094
1095static const struct attribute_group *idxd_wq_attribute_groups[] = {
1096 &idxd_wq_attribute_group,
1097 NULL,
1098};
1099
1100/* IDXD device attribs */
1101static ssize_t max_work_queues_size_show(struct device *dev,
1102 struct device_attribute *attr,
1103 char *buf)
1104{
1105 struct idxd_device *idxd =
1106 container_of(dev, struct idxd_device, conf_dev);
1107
1108 return sprintf(buf, "%u\n", idxd->max_wq_size);
1109}
1110static DEVICE_ATTR_RO(max_work_queues_size);
1111
1112static ssize_t max_groups_show(struct device *dev,
1113 struct device_attribute *attr, char *buf)
1114{
1115 struct idxd_device *idxd =
1116 container_of(dev, struct idxd_device, conf_dev);
1117
1118 return sprintf(buf, "%u\n", idxd->max_groups);
1119}
1120static DEVICE_ATTR_RO(max_groups);
1121
1122static ssize_t max_work_queues_show(struct device *dev,
1123 struct device_attribute *attr, char *buf)
1124{
1125 struct idxd_device *idxd =
1126 container_of(dev, struct idxd_device, conf_dev);
1127
1128 return sprintf(buf, "%u\n", idxd->max_wqs);
1129}
1130static DEVICE_ATTR_RO(max_work_queues);
1131
1132static ssize_t max_engines_show(struct device *dev,
1133 struct device_attribute *attr, char *buf)
1134{
1135 struct idxd_device *idxd =
1136 container_of(dev, struct idxd_device, conf_dev);
1137
1138 return sprintf(buf, "%u\n", idxd->max_engines);
1139}
1140static DEVICE_ATTR_RO(max_engines);
1141
1142static ssize_t numa_node_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1144{
1145 struct idxd_device *idxd =
1146 container_of(dev, struct idxd_device, conf_dev);
1147
1148 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1149}
1150static DEVICE_ATTR_RO(numa_node);
1151
1152static ssize_t max_batch_size_show(struct device *dev,
1153 struct device_attribute *attr, char *buf)
1154{
1155 struct idxd_device *idxd =
1156 container_of(dev, struct idxd_device, conf_dev);
1157
1158 return sprintf(buf, "%u\n", idxd->max_batch_size);
1159}
1160static DEVICE_ATTR_RO(max_batch_size);
1161
1162static ssize_t max_transfer_size_show(struct device *dev,
1163 struct device_attribute *attr,
1164 char *buf)
1165{
1166 struct idxd_device *idxd =
1167 container_of(dev, struct idxd_device, conf_dev);
1168
1169 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1170}
1171static DEVICE_ATTR_RO(max_transfer_size);
1172
1173static ssize_t op_cap_show(struct device *dev,
1174 struct device_attribute *attr, char *buf)
1175{
1176 struct idxd_device *idxd =
1177 container_of(dev, struct idxd_device, conf_dev);
1178
1179 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1180}
1181static DEVICE_ATTR_RO(op_cap);
1182
1183static ssize_t configurable_show(struct device *dev,
1184 struct device_attribute *attr, char *buf)
1185{
1186 struct idxd_device *idxd =
1187 container_of(dev, struct idxd_device, conf_dev);
1188
1189 return sprintf(buf, "%u\n",
1190 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1191}
1192static DEVICE_ATTR_RO(configurable);
1193
1194static ssize_t clients_show(struct device *dev,
1195 struct device_attribute *attr, char *buf)
1196{
1197 struct idxd_device *idxd =
1198 container_of(dev, struct idxd_device, conf_dev);
1199 unsigned long flags;
1200 int count = 0, i;
1201
1202 spin_lock_irqsave(&idxd->dev_lock, flags);
1203 for (i = 0; i < idxd->max_wqs; i++) {
1204 struct idxd_wq *wq = &idxd->wqs[i];
1205
1206 count += wq->client_count;
1207 }
1208 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1209
1210 return sprintf(buf, "%d\n", count);
1211}
1212static DEVICE_ATTR_RO(clients);
1213
1214static ssize_t state_show(struct device *dev,
1215 struct device_attribute *attr, char *buf)
1216{
1217 struct idxd_device *idxd =
1218 container_of(dev, struct idxd_device, conf_dev);
1219
1220 switch (idxd->state) {
1221 case IDXD_DEV_DISABLED:
1222 case IDXD_DEV_CONF_READY:
1223 return sprintf(buf, "disabled\n");
1224 case IDXD_DEV_ENABLED:
1225 return sprintf(buf, "enabled\n");
1226 case IDXD_DEV_HALTED:
1227 return sprintf(buf, "halted\n");
1228 }
1229
1230 return sprintf(buf, "unknown\n");
1231}
1232static DEVICE_ATTR_RO(state);
1233
1234static ssize_t errors_show(struct device *dev,
1235 struct device_attribute *attr, char *buf)
1236{
1237 struct idxd_device *idxd =
1238 container_of(dev, struct idxd_device, conf_dev);
1239 int i, out = 0;
1240 unsigned long flags;
1241
1242 spin_lock_irqsave(&idxd->dev_lock, flags);
1243 for (i = 0; i < 4; i++)
1244 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1245 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1246 out--;
1247 out += sprintf(buf + out, "\n");
1248 return out;
1249}
1250static DEVICE_ATTR_RO(errors);
1251
1252static ssize_t max_tokens_show(struct device *dev,
1253 struct device_attribute *attr, char *buf)
1254{
1255 struct idxd_device *idxd =
1256 container_of(dev, struct idxd_device, conf_dev);
1257
1258 return sprintf(buf, "%u\n", idxd->max_tokens);
1259}
1260static DEVICE_ATTR_RO(max_tokens);
1261
1262static ssize_t token_limit_show(struct device *dev,
1263 struct device_attribute *attr, char *buf)
1264{
1265 struct idxd_device *idxd =
1266 container_of(dev, struct idxd_device, conf_dev);
1267
1268 return sprintf(buf, "%u\n", idxd->token_limit);
1269}
1270
1271static ssize_t token_limit_store(struct device *dev,
1272 struct device_attribute *attr,
1273 const char *buf, size_t count)
1274{
1275 struct idxd_device *idxd =
1276 container_of(dev, struct idxd_device, conf_dev);
1277 unsigned long val;
1278 int rc;
1279
1280 rc = kstrtoul(buf, 10, &val);
1281 if (rc < 0)
1282 return -EINVAL;
1283
1284 if (idxd->state == IDXD_DEV_ENABLED)
1285 return -EPERM;
1286
1287 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1288 return -EPERM;
1289
1290 if (!idxd->hw.group_cap.token_limit)
1291 return -EPERM;
1292
1293 if (val > idxd->hw.group_cap.total_tokens)
1294 return -EINVAL;
1295
1296 idxd->token_limit = val;
1297 return count;
1298}
1299static DEVICE_ATTR_RW(token_limit);
1300
42d279f9
DJ
1301static ssize_t cdev_major_show(struct device *dev,
1302 struct device_attribute *attr, char *buf)
1303{
1304 struct idxd_device *idxd =
1305 container_of(dev, struct idxd_device, conf_dev);
1306
1307 return sprintf(buf, "%u\n", idxd->major);
1308}
1309static DEVICE_ATTR_RO(cdev_major);
1310
c52ca478
DJ
1311static struct attribute *idxd_device_attributes[] = {
1312 &dev_attr_max_groups.attr,
1313 &dev_attr_max_work_queues.attr,
1314 &dev_attr_max_work_queues_size.attr,
1315 &dev_attr_max_engines.attr,
1316 &dev_attr_numa_node.attr,
1317 &dev_attr_max_batch_size.attr,
1318 &dev_attr_max_transfer_size.attr,
1319 &dev_attr_op_cap.attr,
1320 &dev_attr_configurable.attr,
1321 &dev_attr_clients.attr,
1322 &dev_attr_state.attr,
1323 &dev_attr_errors.attr,
1324 &dev_attr_max_tokens.attr,
1325 &dev_attr_token_limit.attr,
42d279f9 1326 &dev_attr_cdev_major.attr,
c52ca478
DJ
1327 NULL,
1328};
1329
1330static const struct attribute_group idxd_device_attribute_group = {
1331 .attrs = idxd_device_attributes,
1332};
1333
1334static const struct attribute_group *idxd_attribute_groups[] = {
1335 &idxd_device_attribute_group,
1336 NULL,
1337};
1338
1339static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1340{
1341 struct device *dev = &idxd->pdev->dev;
1342 int i, rc;
1343
1344 for (i = 0; i < idxd->max_engines; i++) {
1345 struct idxd_engine *engine = &idxd->engines[i];
1346
1347 engine->conf_dev.parent = &idxd->conf_dev;
1348 dev_set_name(&engine->conf_dev, "engine%d.%d",
1349 idxd->id, engine->id);
1350 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1351 engine->conf_dev.groups = idxd_engine_attribute_groups;
1352 engine->conf_dev.type = &idxd_engine_device_type;
1353 dev_dbg(dev, "Engine device register: %s\n",
1354 dev_name(&engine->conf_dev));
1355 rc = device_register(&engine->conf_dev);
1356 if (rc < 0) {
1357 put_device(&engine->conf_dev);
1358 goto cleanup;
1359 }
1360 }
1361
1362 return 0;
1363
1364cleanup:
1365 while (i--) {
1366 struct idxd_engine *engine = &idxd->engines[i];
1367
1368 device_unregister(&engine->conf_dev);
1369 }
1370 return rc;
1371}
1372
1373static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1374{
1375 struct device *dev = &idxd->pdev->dev;
1376 int i, rc;
1377
1378 for (i = 0; i < idxd->max_groups; i++) {
1379 struct idxd_group *group = &idxd->groups[i];
1380
1381 group->conf_dev.parent = &idxd->conf_dev;
1382 dev_set_name(&group->conf_dev, "group%d.%d",
1383 idxd->id, group->id);
1384 group->conf_dev.bus = idxd_get_bus_type(idxd);
1385 group->conf_dev.groups = idxd_group_attribute_groups;
1386 group->conf_dev.type = &idxd_group_device_type;
1387 dev_dbg(dev, "Group device register: %s\n",
1388 dev_name(&group->conf_dev));
1389 rc = device_register(&group->conf_dev);
1390 if (rc < 0) {
1391 put_device(&group->conf_dev);
1392 goto cleanup;
1393 }
1394 }
1395
1396 return 0;
1397
1398cleanup:
1399 while (i--) {
1400 struct idxd_group *group = &idxd->groups[i];
1401
1402 device_unregister(&group->conf_dev);
1403 }
1404 return rc;
1405}
1406
1407static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1408{
1409 struct device *dev = &idxd->pdev->dev;
1410 int i, rc;
1411
1412 for (i = 0; i < idxd->max_wqs; i++) {
1413 struct idxd_wq *wq = &idxd->wqs[i];
1414
1415 wq->conf_dev.parent = &idxd->conf_dev;
1416 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1417 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1418 wq->conf_dev.groups = idxd_wq_attribute_groups;
1419 wq->conf_dev.type = &idxd_wq_device_type;
1420 dev_dbg(dev, "WQ device register: %s\n",
1421 dev_name(&wq->conf_dev));
1422 rc = device_register(&wq->conf_dev);
1423 if (rc < 0) {
1424 put_device(&wq->conf_dev);
1425 goto cleanup;
1426 }
1427 }
1428
1429 return 0;
1430
1431cleanup:
1432 while (i--) {
1433 struct idxd_wq *wq = &idxd->wqs[i];
1434
1435 device_unregister(&wq->conf_dev);
1436 }
1437 return rc;
1438}
1439
1440static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1441{
1442 struct device *dev = &idxd->pdev->dev;
1443 int rc;
1444 char devname[IDXD_NAME_SIZE];
1445
1446 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1447 idxd->conf_dev.parent = dev;
1448 dev_set_name(&idxd->conf_dev, "%s", devname);
1449 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1450 idxd->conf_dev.groups = idxd_attribute_groups;
1451 idxd->conf_dev.type = idxd_get_device_type(idxd);
1452
1453 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1454 rc = device_register(&idxd->conf_dev);
1455 if (rc < 0) {
1456 put_device(&idxd->conf_dev);
1457 return rc;
1458 }
1459
1460 return 0;
1461}
1462
1463int idxd_setup_sysfs(struct idxd_device *idxd)
1464{
1465 struct device *dev = &idxd->pdev->dev;
1466 int rc;
1467
1468 rc = idxd_setup_device_sysfs(idxd);
1469 if (rc < 0) {
1470 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1471 return rc;
1472 }
1473
1474 rc = idxd_setup_wq_sysfs(idxd);
1475 if (rc < 0) {
1476 /* unregister conf dev */
1477 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1478 return rc;
1479 }
1480
1481 rc = idxd_setup_group_sysfs(idxd);
1482 if (rc < 0) {
1483 /* unregister conf dev */
1484 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1485 return rc;
1486 }
1487
1488 rc = idxd_setup_engine_sysfs(idxd);
1489 if (rc < 0) {
1490 /* unregister conf dev */
1491 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1492 return rc;
1493 }
1494
1495 return 0;
1496}
1497
1498void idxd_cleanup_sysfs(struct idxd_device *idxd)
1499{
1500 int i;
1501
1502 for (i = 0; i < idxd->max_wqs; i++) {
1503 struct idxd_wq *wq = &idxd->wqs[i];
1504
1505 device_unregister(&wq->conf_dev);
1506 }
1507
1508 for (i = 0; i < idxd->max_engines; i++) {
1509 struct idxd_engine *engine = &idxd->engines[i];
1510
1511 device_unregister(&engine->conf_dev);
1512 }
1513
1514 for (i = 0; i < idxd->max_groups; i++) {
1515 struct idxd_group *group = &idxd->groups[i];
1516
1517 device_unregister(&group->conf_dev);
1518 }
1519
1520 device_unregister(&idxd->conf_dev);
1521}
1522
1523int idxd_register_bus_type(void)
1524{
1525 int i, rc;
1526
1527 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1528 rc = bus_register(idxd_bus_types[i]);
1529 if (rc < 0)
1530 goto bus_err;
1531 }
1532
1533 return 0;
1534
1535bus_err:
1536 for (; i > 0; i--)
1537 bus_unregister(idxd_bus_types[i]);
1538 return rc;
1539}
1540
1541void idxd_unregister_bus_type(void)
1542{
1543 int i;
1544
1545 for (i = 0; i < IDXD_TYPE_MAX; i++)
1546 bus_unregister(idxd_bus_types[i]);
1547}