]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/dma/idxd/sysfs.c
dmaengine: idxd: move wq_enable() to device.c
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17 };
18
19 static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
21 {
22 int matched = 0;
23
24 if (is_idxd_dev(dev)) {
25 matched = 1;
26 } else if (is_idxd_wq_dev(dev)) {
27 struct idxd_wq *wq = confdev_to_wq(dev);
28
29 if (wq->state != IDXD_WQ_DISABLED) {
30 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
31 return 0;
32 }
33 matched = 1;
34 }
35
36 if (matched)
37 dev_dbg(dev, "%s matched\n", dev_name(dev));
38
39 return matched;
40 }
41
42 static int idxd_config_bus_probe(struct device *dev)
43 {
44 int rc = 0;
45 unsigned long flags;
46
47 dev_dbg(dev, "%s called\n", __func__);
48
49 if (is_idxd_dev(dev)) {
50 struct idxd_device *idxd = confdev_to_idxd(dev);
51
52 if (!try_module_get(THIS_MODULE))
53 return -ENXIO;
54
55 /* Perform IDXD configuration and enabling */
56 spin_lock_irqsave(&idxd->dev_lock, flags);
57 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
58 rc = idxd_device_config(idxd);
59 spin_unlock_irqrestore(&idxd->dev_lock, flags);
60 if (rc < 0) {
61 module_put(THIS_MODULE);
62 dev_warn(dev, "Device config failed: %d\n", rc);
63 return rc;
64 }
65
66 /* start device */
67 rc = idxd_device_enable(idxd);
68 if (rc < 0) {
69 module_put(THIS_MODULE);
70 dev_warn(dev, "Device enable failed: %d\n", rc);
71 return rc;
72 }
73
74 dev_info(dev, "Device %s enabled\n", dev_name(dev));
75
76 rc = idxd_register_dma_device(idxd);
77 if (rc < 0) {
78 module_put(THIS_MODULE);
79 dev_dbg(dev, "Failed to register dmaengine device\n");
80 return rc;
81 }
82 return 0;
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85
86 return drv_enable_wq(wq);
87 }
88
89 return -ENODEV;
90 }
91
92 static void disable_wq(struct idxd_wq *wq)
93 {
94 struct idxd_device *idxd = wq->idxd;
95 struct device *dev = &idxd->pdev->dev;
96
97 mutex_lock(&wq->wq_lock);
98 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(wq_confdev(wq)));
99 if (wq->state == IDXD_WQ_DISABLED) {
100 mutex_unlock(&wq->wq_lock);
101 return;
102 }
103
104 if (wq->type == IDXD_WQT_KERNEL)
105 idxd_wq_quiesce(wq);
106
107 if (is_idxd_wq_dmaengine(wq))
108 idxd_unregister_dma_channel(wq);
109 else if (is_idxd_wq_cdev(wq))
110 idxd_wq_del_cdev(wq);
111
112 if (idxd_wq_refcount(wq))
113 dev_warn(dev, "Clients has claim on wq %d: %d\n",
114 wq->id, idxd_wq_refcount(wq));
115
116 idxd_wq_unmap_portal(wq);
117
118 idxd_wq_drain(wq);
119 idxd_wq_reset(wq);
120
121 idxd_wq_free_resources(wq);
122 wq->client_count = 0;
123 mutex_unlock(&wq->wq_lock);
124
125 dev_info(dev, "wq %s disabled\n", dev_name(wq_confdev(wq)));
126 }
127
128 static int idxd_config_bus_remove(struct device *dev)
129 {
130 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
131
132 /* disable workqueue here */
133 if (is_idxd_wq_dev(dev)) {
134 struct idxd_wq *wq = confdev_to_wq(dev);
135
136 disable_wq(wq);
137 } else if (is_idxd_dev(dev)) {
138 struct idxd_device *idxd = confdev_to_idxd(dev);
139 int i;
140
141 dev_dbg(dev, "%s removing dev %s\n", __func__,
142 dev_name(idxd_confdev(idxd)));
143 for (i = 0; i < idxd->max_wqs; i++) {
144 struct idxd_wq *wq = idxd->wqs[i];
145
146 if (wq->state == IDXD_WQ_DISABLED)
147 continue;
148 dev_warn(dev, "Active wq %d on disable %s.\n", i,
149 dev_name(wq_confdev(wq)));
150 device_release_driver(wq_confdev(wq));
151 }
152
153 idxd_unregister_dma_device(idxd);
154 idxd_device_disable(idxd);
155 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
156 idxd_device_reset(idxd);
157 module_put(THIS_MODULE);
158
159 dev_info(dev, "Device %s disabled\n", dev_name(dev));
160 }
161
162 return 0;
163 }
164
165 static void idxd_config_bus_shutdown(struct device *dev)
166 {
167 dev_dbg(dev, "%s called\n", __func__);
168 }
169
170 struct bus_type dsa_bus_type = {
171 .name = "dsa",
172 .match = idxd_config_bus_match,
173 .probe = idxd_config_bus_probe,
174 .remove = idxd_config_bus_remove,
175 .shutdown = idxd_config_bus_shutdown,
176 };
177
178 static struct idxd_device_driver dsa_drv = {
179 .name = "dsa",
180 };
181
182 /* IDXD generic driver setup */
183 int idxd_register_driver(void)
184 {
185 return idxd_driver_register(&dsa_drv);
186 }
187
188 void idxd_unregister_driver(void)
189 {
190 idxd_driver_unregister(&dsa_drv);
191 }
192
193 /* IDXD engine attributes */
194 static ssize_t engine_group_id_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196 {
197 struct idxd_engine *engine = confdev_to_engine(dev);
198
199 if (engine->group)
200 return sysfs_emit(buf, "%d\n", engine->group->id);
201 else
202 return sysfs_emit(buf, "%d\n", -1);
203 }
204
205 static ssize_t engine_group_id_store(struct device *dev,
206 struct device_attribute *attr,
207 const char *buf, size_t count)
208 {
209 struct idxd_engine *engine = confdev_to_engine(dev);
210 struct idxd_device *idxd = engine->idxd;
211 long id;
212 int rc;
213 struct idxd_group *prevg;
214
215 rc = kstrtol(buf, 10, &id);
216 if (rc < 0)
217 return -EINVAL;
218
219 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
220 return -EPERM;
221
222 if (id > idxd->max_groups - 1 || id < -1)
223 return -EINVAL;
224
225 if (id == -1) {
226 if (engine->group) {
227 engine->group->num_engines--;
228 engine->group = NULL;
229 }
230 return count;
231 }
232
233 prevg = engine->group;
234
235 if (prevg)
236 prevg->num_engines--;
237 engine->group = idxd->groups[id];
238 engine->group->num_engines++;
239
240 return count;
241 }
242
243 static struct device_attribute dev_attr_engine_group =
244 __ATTR(group_id, 0644, engine_group_id_show,
245 engine_group_id_store);
246
247 static struct attribute *idxd_engine_attributes[] = {
248 &dev_attr_engine_group.attr,
249 NULL,
250 };
251
252 static const struct attribute_group idxd_engine_attribute_group = {
253 .attrs = idxd_engine_attributes,
254 };
255
256 static const struct attribute_group *idxd_engine_attribute_groups[] = {
257 &idxd_engine_attribute_group,
258 NULL,
259 };
260
261 static void idxd_conf_engine_release(struct device *dev)
262 {
263 struct idxd_engine *engine = confdev_to_engine(dev);
264
265 kfree(engine);
266 }
267
268 struct device_type idxd_engine_device_type = {
269 .name = "engine",
270 .release = idxd_conf_engine_release,
271 .groups = idxd_engine_attribute_groups,
272 };
273
274 /* Group attributes */
275
276 static void idxd_set_free_tokens(struct idxd_device *idxd)
277 {
278 int i, tokens;
279
280 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
281 struct idxd_group *g = idxd->groups[i];
282
283 tokens += g->tokens_reserved;
284 }
285
286 idxd->nr_tokens = idxd->max_tokens - tokens;
287 }
288
289 static ssize_t group_tokens_reserved_show(struct device *dev,
290 struct device_attribute *attr,
291 char *buf)
292 {
293 struct idxd_group *group = confdev_to_group(dev);
294
295 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
296 }
297
298 static ssize_t group_tokens_reserved_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t count)
301 {
302 struct idxd_group *group = confdev_to_group(dev);
303 struct idxd_device *idxd = group->idxd;
304 unsigned long val;
305 int rc;
306
307 rc = kstrtoul(buf, 10, &val);
308 if (rc < 0)
309 return -EINVAL;
310
311 if (idxd->data->type == IDXD_TYPE_IAX)
312 return -EOPNOTSUPP;
313
314 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
315 return -EPERM;
316
317 if (idxd->state == IDXD_DEV_ENABLED)
318 return -EPERM;
319
320 if (val > idxd->max_tokens)
321 return -EINVAL;
322
323 if (val > idxd->nr_tokens + group->tokens_reserved)
324 return -EINVAL;
325
326 group->tokens_reserved = val;
327 idxd_set_free_tokens(idxd);
328 return count;
329 }
330
331 static struct device_attribute dev_attr_group_tokens_reserved =
332 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
333 group_tokens_reserved_store);
334
335 static ssize_t group_tokens_allowed_show(struct device *dev,
336 struct device_attribute *attr,
337 char *buf)
338 {
339 struct idxd_group *group = confdev_to_group(dev);
340
341 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
342 }
343
344 static ssize_t group_tokens_allowed_store(struct device *dev,
345 struct device_attribute *attr,
346 const char *buf, size_t count)
347 {
348 struct idxd_group *group = confdev_to_group(dev);
349 struct idxd_device *idxd = group->idxd;
350 unsigned long val;
351 int rc;
352
353 rc = kstrtoul(buf, 10, &val);
354 if (rc < 0)
355 return -EINVAL;
356
357 if (idxd->data->type == IDXD_TYPE_IAX)
358 return -EOPNOTSUPP;
359
360 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
361 return -EPERM;
362
363 if (idxd->state == IDXD_DEV_ENABLED)
364 return -EPERM;
365
366 if (val < 4 * group->num_engines ||
367 val > group->tokens_reserved + idxd->nr_tokens)
368 return -EINVAL;
369
370 group->tokens_allowed = val;
371 return count;
372 }
373
374 static struct device_attribute dev_attr_group_tokens_allowed =
375 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
376 group_tokens_allowed_store);
377
378 static ssize_t group_use_token_limit_show(struct device *dev,
379 struct device_attribute *attr,
380 char *buf)
381 {
382 struct idxd_group *group = confdev_to_group(dev);
383
384 return sysfs_emit(buf, "%u\n", group->use_token_limit);
385 }
386
387 static ssize_t group_use_token_limit_store(struct device *dev,
388 struct device_attribute *attr,
389 const char *buf, size_t count)
390 {
391 struct idxd_group *group = confdev_to_group(dev);
392 struct idxd_device *idxd = group->idxd;
393 unsigned long val;
394 int rc;
395
396 rc = kstrtoul(buf, 10, &val);
397 if (rc < 0)
398 return -EINVAL;
399
400 if (idxd->data->type == IDXD_TYPE_IAX)
401 return -EOPNOTSUPP;
402
403 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
404 return -EPERM;
405
406 if (idxd->state == IDXD_DEV_ENABLED)
407 return -EPERM;
408
409 if (idxd->token_limit == 0)
410 return -EPERM;
411
412 group->use_token_limit = !!val;
413 return count;
414 }
415
416 static struct device_attribute dev_attr_group_use_token_limit =
417 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
418 group_use_token_limit_store);
419
420 static ssize_t group_engines_show(struct device *dev,
421 struct device_attribute *attr, char *buf)
422 {
423 struct idxd_group *group = confdev_to_group(dev);
424 int i, rc = 0;
425 struct idxd_device *idxd = group->idxd;
426
427 for (i = 0; i < idxd->max_engines; i++) {
428 struct idxd_engine *engine = idxd->engines[i];
429
430 if (!engine->group)
431 continue;
432
433 if (engine->group->id == group->id)
434 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
435 }
436
437 if (!rc)
438 return 0;
439 rc--;
440 rc += sysfs_emit_at(buf, rc, "\n");
441
442 return rc;
443 }
444
445 static struct device_attribute dev_attr_group_engines =
446 __ATTR(engines, 0444, group_engines_show, NULL);
447
448 static ssize_t group_work_queues_show(struct device *dev,
449 struct device_attribute *attr, char *buf)
450 {
451 struct idxd_group *group = confdev_to_group(dev);
452 int i, rc = 0;
453 struct idxd_device *idxd = group->idxd;
454
455 for (i = 0; i < idxd->max_wqs; i++) {
456 struct idxd_wq *wq = idxd->wqs[i];
457
458 if (!wq->group)
459 continue;
460
461 if (wq->group->id == group->id)
462 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
463 }
464
465 if (!rc)
466 return 0;
467 rc--;
468 rc += sysfs_emit_at(buf, rc, "\n");
469
470 return rc;
471 }
472
473 static struct device_attribute dev_attr_group_work_queues =
474 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
475
476 static ssize_t group_traffic_class_a_show(struct device *dev,
477 struct device_attribute *attr,
478 char *buf)
479 {
480 struct idxd_group *group = confdev_to_group(dev);
481
482 return sysfs_emit(buf, "%d\n", group->tc_a);
483 }
484
485 static ssize_t group_traffic_class_a_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t count)
488 {
489 struct idxd_group *group = confdev_to_group(dev);
490 struct idxd_device *idxd = group->idxd;
491 long val;
492 int rc;
493
494 rc = kstrtol(buf, 10, &val);
495 if (rc < 0)
496 return -EINVAL;
497
498 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
499 return -EPERM;
500
501 if (idxd->state == IDXD_DEV_ENABLED)
502 return -EPERM;
503
504 if (val < 0 || val > 7)
505 return -EINVAL;
506
507 group->tc_a = val;
508 return count;
509 }
510
511 static struct device_attribute dev_attr_group_traffic_class_a =
512 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
513 group_traffic_class_a_store);
514
515 static ssize_t group_traffic_class_b_show(struct device *dev,
516 struct device_attribute *attr,
517 char *buf)
518 {
519 struct idxd_group *group = confdev_to_group(dev);
520
521 return sysfs_emit(buf, "%d\n", group->tc_b);
522 }
523
524 static ssize_t group_traffic_class_b_store(struct device *dev,
525 struct device_attribute *attr,
526 const char *buf, size_t count)
527 {
528 struct idxd_group *group = confdev_to_group(dev);
529 struct idxd_device *idxd = group->idxd;
530 long val;
531 int rc;
532
533 rc = kstrtol(buf, 10, &val);
534 if (rc < 0)
535 return -EINVAL;
536
537 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
538 return -EPERM;
539
540 if (idxd->state == IDXD_DEV_ENABLED)
541 return -EPERM;
542
543 if (val < 0 || val > 7)
544 return -EINVAL;
545
546 group->tc_b = val;
547 return count;
548 }
549
550 static struct device_attribute dev_attr_group_traffic_class_b =
551 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
552 group_traffic_class_b_store);
553
554 static struct attribute *idxd_group_attributes[] = {
555 &dev_attr_group_work_queues.attr,
556 &dev_attr_group_engines.attr,
557 &dev_attr_group_use_token_limit.attr,
558 &dev_attr_group_tokens_allowed.attr,
559 &dev_attr_group_tokens_reserved.attr,
560 &dev_attr_group_traffic_class_a.attr,
561 &dev_attr_group_traffic_class_b.attr,
562 NULL,
563 };
564
565 static const struct attribute_group idxd_group_attribute_group = {
566 .attrs = idxd_group_attributes,
567 };
568
569 static const struct attribute_group *idxd_group_attribute_groups[] = {
570 &idxd_group_attribute_group,
571 NULL,
572 };
573
574 static void idxd_conf_group_release(struct device *dev)
575 {
576 struct idxd_group *group = confdev_to_group(dev);
577
578 kfree(group);
579 }
580
581 struct device_type idxd_group_device_type = {
582 .name = "group",
583 .release = idxd_conf_group_release,
584 .groups = idxd_group_attribute_groups,
585 };
586
587 /* IDXD work queue attribs */
588 static ssize_t wq_clients_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590 {
591 struct idxd_wq *wq = confdev_to_wq(dev);
592
593 return sysfs_emit(buf, "%d\n", wq->client_count);
594 }
595
596 static struct device_attribute dev_attr_wq_clients =
597 __ATTR(clients, 0444, wq_clients_show, NULL);
598
599 static ssize_t wq_state_show(struct device *dev,
600 struct device_attribute *attr, char *buf)
601 {
602 struct idxd_wq *wq = confdev_to_wq(dev);
603
604 switch (wq->state) {
605 case IDXD_WQ_DISABLED:
606 return sysfs_emit(buf, "disabled\n");
607 case IDXD_WQ_ENABLED:
608 return sysfs_emit(buf, "enabled\n");
609 }
610
611 return sysfs_emit(buf, "unknown\n");
612 }
613
614 static struct device_attribute dev_attr_wq_state =
615 __ATTR(state, 0444, wq_state_show, NULL);
616
617 static ssize_t wq_group_id_show(struct device *dev,
618 struct device_attribute *attr, char *buf)
619 {
620 struct idxd_wq *wq = confdev_to_wq(dev);
621
622 if (wq->group)
623 return sysfs_emit(buf, "%u\n", wq->group->id);
624 else
625 return sysfs_emit(buf, "-1\n");
626 }
627
628 static ssize_t wq_group_id_store(struct device *dev,
629 struct device_attribute *attr,
630 const char *buf, size_t count)
631 {
632 struct idxd_wq *wq = confdev_to_wq(dev);
633 struct idxd_device *idxd = wq->idxd;
634 long id;
635 int rc;
636 struct idxd_group *prevg, *group;
637
638 rc = kstrtol(buf, 10, &id);
639 if (rc < 0)
640 return -EINVAL;
641
642 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
643 return -EPERM;
644
645 if (wq->state != IDXD_WQ_DISABLED)
646 return -EPERM;
647
648 if (id > idxd->max_groups - 1 || id < -1)
649 return -EINVAL;
650
651 if (id == -1) {
652 if (wq->group) {
653 wq->group->num_wqs--;
654 wq->group = NULL;
655 }
656 return count;
657 }
658
659 group = idxd->groups[id];
660 prevg = wq->group;
661
662 if (prevg)
663 prevg->num_wqs--;
664 wq->group = group;
665 group->num_wqs++;
666 return count;
667 }
668
669 static struct device_attribute dev_attr_wq_group_id =
670 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
671
672 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
673 char *buf)
674 {
675 struct idxd_wq *wq = confdev_to_wq(dev);
676
677 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
678 }
679
680 static ssize_t wq_mode_store(struct device *dev,
681 struct device_attribute *attr, const char *buf,
682 size_t count)
683 {
684 struct idxd_wq *wq = confdev_to_wq(dev);
685 struct idxd_device *idxd = wq->idxd;
686
687 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
688 return -EPERM;
689
690 if (wq->state != IDXD_WQ_DISABLED)
691 return -EPERM;
692
693 if (sysfs_streq(buf, "dedicated")) {
694 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
695 wq->threshold = 0;
696 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
697 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
698 } else {
699 return -EINVAL;
700 }
701
702 return count;
703 }
704
705 static struct device_attribute dev_attr_wq_mode =
706 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
707
708 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
709 char *buf)
710 {
711 struct idxd_wq *wq = confdev_to_wq(dev);
712
713 return sysfs_emit(buf, "%u\n", wq->size);
714 }
715
716 static int total_claimed_wq_size(struct idxd_device *idxd)
717 {
718 int i;
719 int wq_size = 0;
720
721 for (i = 0; i < idxd->max_wqs; i++) {
722 struct idxd_wq *wq = idxd->wqs[i];
723
724 wq_size += wq->size;
725 }
726
727 return wq_size;
728 }
729
730 static ssize_t wq_size_store(struct device *dev,
731 struct device_attribute *attr, const char *buf,
732 size_t count)
733 {
734 struct idxd_wq *wq = confdev_to_wq(dev);
735 unsigned long size;
736 struct idxd_device *idxd = wq->idxd;
737 int rc;
738
739 rc = kstrtoul(buf, 10, &size);
740 if (rc < 0)
741 return -EINVAL;
742
743 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
744 return -EPERM;
745
746 if (idxd->state == IDXD_DEV_ENABLED)
747 return -EPERM;
748
749 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
750 return -EINVAL;
751
752 wq->size = size;
753 return count;
754 }
755
756 static struct device_attribute dev_attr_wq_size =
757 __ATTR(size, 0644, wq_size_show, wq_size_store);
758
759 static ssize_t wq_priority_show(struct device *dev,
760 struct device_attribute *attr, char *buf)
761 {
762 struct idxd_wq *wq = confdev_to_wq(dev);
763
764 return sysfs_emit(buf, "%u\n", wq->priority);
765 }
766
767 static ssize_t wq_priority_store(struct device *dev,
768 struct device_attribute *attr,
769 const char *buf, size_t count)
770 {
771 struct idxd_wq *wq = confdev_to_wq(dev);
772 unsigned long prio;
773 struct idxd_device *idxd = wq->idxd;
774 int rc;
775
776 rc = kstrtoul(buf, 10, &prio);
777 if (rc < 0)
778 return -EINVAL;
779
780 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
781 return -EPERM;
782
783 if (wq->state != IDXD_WQ_DISABLED)
784 return -EPERM;
785
786 if (prio > IDXD_MAX_PRIORITY)
787 return -EINVAL;
788
789 wq->priority = prio;
790 return count;
791 }
792
793 static struct device_attribute dev_attr_wq_priority =
794 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
795
796 static ssize_t wq_block_on_fault_show(struct device *dev,
797 struct device_attribute *attr, char *buf)
798 {
799 struct idxd_wq *wq = confdev_to_wq(dev);
800
801 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
802 }
803
804 static ssize_t wq_block_on_fault_store(struct device *dev,
805 struct device_attribute *attr,
806 const char *buf, size_t count)
807 {
808 struct idxd_wq *wq = confdev_to_wq(dev);
809 struct idxd_device *idxd = wq->idxd;
810 bool bof;
811 int rc;
812
813 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
814 return -EPERM;
815
816 if (wq->state != IDXD_WQ_DISABLED)
817 return -ENXIO;
818
819 rc = kstrtobool(buf, &bof);
820 if (rc < 0)
821 return rc;
822
823 if (bof)
824 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
825 else
826 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
827
828 return count;
829 }
830
831 static struct device_attribute dev_attr_wq_block_on_fault =
832 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
833 wq_block_on_fault_store);
834
835 static ssize_t wq_threshold_show(struct device *dev,
836 struct device_attribute *attr, char *buf)
837 {
838 struct idxd_wq *wq = confdev_to_wq(dev);
839
840 return sysfs_emit(buf, "%u\n", wq->threshold);
841 }
842
843 static ssize_t wq_threshold_store(struct device *dev,
844 struct device_attribute *attr,
845 const char *buf, size_t count)
846 {
847 struct idxd_wq *wq = confdev_to_wq(dev);
848 struct idxd_device *idxd = wq->idxd;
849 unsigned int val;
850 int rc;
851
852 rc = kstrtouint(buf, 0, &val);
853 if (rc < 0)
854 return -EINVAL;
855
856 if (val > wq->size || val <= 0)
857 return -EINVAL;
858
859 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
860 return -EPERM;
861
862 if (wq->state != IDXD_WQ_DISABLED)
863 return -ENXIO;
864
865 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
866 return -EINVAL;
867
868 wq->threshold = val;
869
870 return count;
871 }
872
873 static struct device_attribute dev_attr_wq_threshold =
874 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
875
876 static ssize_t wq_type_show(struct device *dev,
877 struct device_attribute *attr, char *buf)
878 {
879 struct idxd_wq *wq = confdev_to_wq(dev);
880
881 switch (wq->type) {
882 case IDXD_WQT_KERNEL:
883 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
884 case IDXD_WQT_USER:
885 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
886 case IDXD_WQT_NONE:
887 default:
888 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
889 }
890
891 return -EINVAL;
892 }
893
894 static ssize_t wq_type_store(struct device *dev,
895 struct device_attribute *attr, const char *buf,
896 size_t count)
897 {
898 struct idxd_wq *wq = confdev_to_wq(dev);
899 enum idxd_wq_type old_type;
900
901 if (wq->state != IDXD_WQ_DISABLED)
902 return -EPERM;
903
904 old_type = wq->type;
905 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
906 wq->type = IDXD_WQT_NONE;
907 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
908 wq->type = IDXD_WQT_KERNEL;
909 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
910 wq->type = IDXD_WQT_USER;
911 else
912 return -EINVAL;
913
914 /* If we are changing queue type, clear the name */
915 if (wq->type != old_type)
916 memset(wq->name, 0, WQ_NAME_SIZE + 1);
917
918 return count;
919 }
920
921 static struct device_attribute dev_attr_wq_type =
922 __ATTR(type, 0644, wq_type_show, wq_type_store);
923
924 static ssize_t wq_name_show(struct device *dev,
925 struct device_attribute *attr, char *buf)
926 {
927 struct idxd_wq *wq = confdev_to_wq(dev);
928
929 return sysfs_emit(buf, "%s\n", wq->name);
930 }
931
932 static ssize_t wq_name_store(struct device *dev,
933 struct device_attribute *attr, const char *buf,
934 size_t count)
935 {
936 struct idxd_wq *wq = confdev_to_wq(dev);
937
938 if (wq->state != IDXD_WQ_DISABLED)
939 return -EPERM;
940
941 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
942 return -EINVAL;
943
944 /*
945 * This is temporarily placed here until we have SVM support for
946 * dmaengine.
947 */
948 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
949 return -EOPNOTSUPP;
950
951 memset(wq->name, 0, WQ_NAME_SIZE + 1);
952 strncpy(wq->name, buf, WQ_NAME_SIZE);
953 strreplace(wq->name, '\n', '\0');
954 return count;
955 }
956
957 static struct device_attribute dev_attr_wq_name =
958 __ATTR(name, 0644, wq_name_show, wq_name_store);
959
960 static ssize_t wq_cdev_minor_show(struct device *dev,
961 struct device_attribute *attr, char *buf)
962 {
963 struct idxd_wq *wq = confdev_to_wq(dev);
964 int minor = -1;
965
966 mutex_lock(&wq->wq_lock);
967 if (wq->idxd_cdev)
968 minor = wq->idxd_cdev->minor;
969 mutex_unlock(&wq->wq_lock);
970
971 if (minor == -1)
972 return -ENXIO;
973 return sysfs_emit(buf, "%d\n", minor);
974 }
975
976 static struct device_attribute dev_attr_wq_cdev_minor =
977 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
978
979 static int __get_sysfs_u64(const char *buf, u64 *val)
980 {
981 int rc;
982
983 rc = kstrtou64(buf, 0, val);
984 if (rc < 0)
985 return -EINVAL;
986
987 if (*val == 0)
988 return -EINVAL;
989
990 *val = roundup_pow_of_two(*val);
991 return 0;
992 }
993
994 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
995 char *buf)
996 {
997 struct idxd_wq *wq = confdev_to_wq(dev);
998
999 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1000 }
1001
1002 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1003 const char *buf, size_t count)
1004 {
1005 struct idxd_wq *wq = confdev_to_wq(dev);
1006 struct idxd_device *idxd = wq->idxd;
1007 u64 xfer_size;
1008 int rc;
1009
1010 if (wq->state != IDXD_WQ_DISABLED)
1011 return -EPERM;
1012
1013 rc = __get_sysfs_u64(buf, &xfer_size);
1014 if (rc < 0)
1015 return rc;
1016
1017 if (xfer_size > idxd->max_xfer_bytes)
1018 return -EINVAL;
1019
1020 wq->max_xfer_bytes = xfer_size;
1021
1022 return count;
1023 }
1024
1025 static struct device_attribute dev_attr_wq_max_transfer_size =
1026 __ATTR(max_transfer_size, 0644,
1027 wq_max_transfer_size_show, wq_max_transfer_size_store);
1028
1029 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1030 {
1031 struct idxd_wq *wq = confdev_to_wq(dev);
1032
1033 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1034 }
1035
1036 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1037 const char *buf, size_t count)
1038 {
1039 struct idxd_wq *wq = confdev_to_wq(dev);
1040 struct idxd_device *idxd = wq->idxd;
1041 u64 batch_size;
1042 int rc;
1043
1044 if (wq->state != IDXD_WQ_DISABLED)
1045 return -EPERM;
1046
1047 rc = __get_sysfs_u64(buf, &batch_size);
1048 if (rc < 0)
1049 return rc;
1050
1051 if (batch_size > idxd->max_batch_size)
1052 return -EINVAL;
1053
1054 wq->max_batch_size = (u32)batch_size;
1055
1056 return count;
1057 }
1058
1059 static struct device_attribute dev_attr_wq_max_batch_size =
1060 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1061
1062 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1063 {
1064 struct idxd_wq *wq = confdev_to_wq(dev);
1065
1066 return sysfs_emit(buf, "%u\n", wq->ats_dis);
1067 }
1068
1069 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1070 const char *buf, size_t count)
1071 {
1072 struct idxd_wq *wq = confdev_to_wq(dev);
1073 struct idxd_device *idxd = wq->idxd;
1074 bool ats_dis;
1075 int rc;
1076
1077 if (wq->state != IDXD_WQ_DISABLED)
1078 return -EPERM;
1079
1080 if (!idxd->hw.wq_cap.wq_ats_support)
1081 return -EOPNOTSUPP;
1082
1083 rc = kstrtobool(buf, &ats_dis);
1084 if (rc < 0)
1085 return rc;
1086
1087 wq->ats_dis = ats_dis;
1088
1089 return count;
1090 }
1091
1092 static struct device_attribute dev_attr_wq_ats_disable =
1093 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1094
1095 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1096 {
1097 struct idxd_wq *wq = confdev_to_wq(dev);
1098 struct idxd_device *idxd = wq->idxd;
1099 u32 occup, offset;
1100
1101 if (!idxd->hw.wq_cap.occupancy)
1102 return -EOPNOTSUPP;
1103
1104 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1105 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1106
1107 return sysfs_emit(buf, "%u\n", occup);
1108 }
1109
1110 static struct device_attribute dev_attr_wq_occupancy =
1111 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1112
1113 static struct attribute *idxd_wq_attributes[] = {
1114 &dev_attr_wq_clients.attr,
1115 &dev_attr_wq_state.attr,
1116 &dev_attr_wq_group_id.attr,
1117 &dev_attr_wq_mode.attr,
1118 &dev_attr_wq_size.attr,
1119 &dev_attr_wq_priority.attr,
1120 &dev_attr_wq_block_on_fault.attr,
1121 &dev_attr_wq_threshold.attr,
1122 &dev_attr_wq_type.attr,
1123 &dev_attr_wq_name.attr,
1124 &dev_attr_wq_cdev_minor.attr,
1125 &dev_attr_wq_max_transfer_size.attr,
1126 &dev_attr_wq_max_batch_size.attr,
1127 &dev_attr_wq_ats_disable.attr,
1128 &dev_attr_wq_occupancy.attr,
1129 NULL,
1130 };
1131
1132 static const struct attribute_group idxd_wq_attribute_group = {
1133 .attrs = idxd_wq_attributes,
1134 };
1135
1136 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1137 &idxd_wq_attribute_group,
1138 NULL,
1139 };
1140
1141 static void idxd_conf_wq_release(struct device *dev)
1142 {
1143 struct idxd_wq *wq = confdev_to_wq(dev);
1144
1145 kfree(wq->wqcfg);
1146 kfree(wq);
1147 }
1148
1149 struct device_type idxd_wq_device_type = {
1150 .name = "wq",
1151 .release = idxd_conf_wq_release,
1152 .groups = idxd_wq_attribute_groups,
1153 };
1154
1155 /* IDXD device attribs */
1156 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1157 char *buf)
1158 {
1159 struct idxd_device *idxd = confdev_to_idxd(dev);
1160
1161 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1162 }
1163 static DEVICE_ATTR_RO(version);
1164
1165 static ssize_t max_work_queues_size_show(struct device *dev,
1166 struct device_attribute *attr,
1167 char *buf)
1168 {
1169 struct idxd_device *idxd = confdev_to_idxd(dev);
1170
1171 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1172 }
1173 static DEVICE_ATTR_RO(max_work_queues_size);
1174
1175 static ssize_t max_groups_show(struct device *dev,
1176 struct device_attribute *attr, char *buf)
1177 {
1178 struct idxd_device *idxd = confdev_to_idxd(dev);
1179
1180 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1181 }
1182 static DEVICE_ATTR_RO(max_groups);
1183
1184 static ssize_t max_work_queues_show(struct device *dev,
1185 struct device_attribute *attr, char *buf)
1186 {
1187 struct idxd_device *idxd = confdev_to_idxd(dev);
1188
1189 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1190 }
1191 static DEVICE_ATTR_RO(max_work_queues);
1192
1193 static ssize_t max_engines_show(struct device *dev,
1194 struct device_attribute *attr, char *buf)
1195 {
1196 struct idxd_device *idxd = confdev_to_idxd(dev);
1197
1198 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1199 }
1200 static DEVICE_ATTR_RO(max_engines);
1201
1202 static ssize_t numa_node_show(struct device *dev,
1203 struct device_attribute *attr, char *buf)
1204 {
1205 struct idxd_device *idxd = confdev_to_idxd(dev);
1206
1207 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1208 }
1209 static DEVICE_ATTR_RO(numa_node);
1210
1211 static ssize_t max_batch_size_show(struct device *dev,
1212 struct device_attribute *attr, char *buf)
1213 {
1214 struct idxd_device *idxd = confdev_to_idxd(dev);
1215
1216 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1217 }
1218 static DEVICE_ATTR_RO(max_batch_size);
1219
1220 static ssize_t max_transfer_size_show(struct device *dev,
1221 struct device_attribute *attr,
1222 char *buf)
1223 {
1224 struct idxd_device *idxd = confdev_to_idxd(dev);
1225
1226 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1227 }
1228 static DEVICE_ATTR_RO(max_transfer_size);
1229
1230 static ssize_t op_cap_show(struct device *dev,
1231 struct device_attribute *attr, char *buf)
1232 {
1233 struct idxd_device *idxd = confdev_to_idxd(dev);
1234 int i, rc = 0;
1235
1236 for (i = 0; i < 4; i++)
1237 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1238
1239 rc--;
1240 rc += sysfs_emit_at(buf, rc, "\n");
1241 return rc;
1242 }
1243 static DEVICE_ATTR_RO(op_cap);
1244
1245 static ssize_t gen_cap_show(struct device *dev,
1246 struct device_attribute *attr, char *buf)
1247 {
1248 struct idxd_device *idxd = confdev_to_idxd(dev);
1249
1250 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1251 }
1252 static DEVICE_ATTR_RO(gen_cap);
1253
1254 static ssize_t configurable_show(struct device *dev,
1255 struct device_attribute *attr, char *buf)
1256 {
1257 struct idxd_device *idxd = confdev_to_idxd(dev);
1258
1259 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1260 }
1261 static DEVICE_ATTR_RO(configurable);
1262
1263 static ssize_t clients_show(struct device *dev,
1264 struct device_attribute *attr, char *buf)
1265 {
1266 struct idxd_device *idxd = confdev_to_idxd(dev);
1267 unsigned long flags;
1268 int count = 0, i;
1269
1270 spin_lock_irqsave(&idxd->dev_lock, flags);
1271 for (i = 0; i < idxd->max_wqs; i++) {
1272 struct idxd_wq *wq = idxd->wqs[i];
1273
1274 count += wq->client_count;
1275 }
1276 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1277
1278 return sysfs_emit(buf, "%d\n", count);
1279 }
1280 static DEVICE_ATTR_RO(clients);
1281
1282 static ssize_t pasid_enabled_show(struct device *dev,
1283 struct device_attribute *attr, char *buf)
1284 {
1285 struct idxd_device *idxd = confdev_to_idxd(dev);
1286
1287 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1288 }
1289 static DEVICE_ATTR_RO(pasid_enabled);
1290
1291 static ssize_t state_show(struct device *dev,
1292 struct device_attribute *attr, char *buf)
1293 {
1294 struct idxd_device *idxd = confdev_to_idxd(dev);
1295
1296 switch (idxd->state) {
1297 case IDXD_DEV_DISABLED:
1298 return sysfs_emit(buf, "disabled\n");
1299 case IDXD_DEV_ENABLED:
1300 return sysfs_emit(buf, "enabled\n");
1301 case IDXD_DEV_HALTED:
1302 return sysfs_emit(buf, "halted\n");
1303 }
1304
1305 return sysfs_emit(buf, "unknown\n");
1306 }
1307 static DEVICE_ATTR_RO(state);
1308
1309 static ssize_t errors_show(struct device *dev,
1310 struct device_attribute *attr, char *buf)
1311 {
1312 struct idxd_device *idxd = confdev_to_idxd(dev);
1313 int i, out = 0;
1314 unsigned long flags;
1315
1316 spin_lock_irqsave(&idxd->dev_lock, flags);
1317 for (i = 0; i < 4; i++)
1318 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1319 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1320 out--;
1321 out += sysfs_emit_at(buf, out, "\n");
1322 return out;
1323 }
1324 static DEVICE_ATTR_RO(errors);
1325
1326 static ssize_t max_tokens_show(struct device *dev,
1327 struct device_attribute *attr, char *buf)
1328 {
1329 struct idxd_device *idxd = confdev_to_idxd(dev);
1330
1331 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1332 }
1333 static DEVICE_ATTR_RO(max_tokens);
1334
1335 static ssize_t token_limit_show(struct device *dev,
1336 struct device_attribute *attr, char *buf)
1337 {
1338 struct idxd_device *idxd = confdev_to_idxd(dev);
1339
1340 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1341 }
1342
1343 static ssize_t token_limit_store(struct device *dev,
1344 struct device_attribute *attr,
1345 const char *buf, size_t count)
1346 {
1347 struct idxd_device *idxd = confdev_to_idxd(dev);
1348 unsigned long val;
1349 int rc;
1350
1351 rc = kstrtoul(buf, 10, &val);
1352 if (rc < 0)
1353 return -EINVAL;
1354
1355 if (idxd->state == IDXD_DEV_ENABLED)
1356 return -EPERM;
1357
1358 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1359 return -EPERM;
1360
1361 if (!idxd->hw.group_cap.token_limit)
1362 return -EPERM;
1363
1364 if (val > idxd->hw.group_cap.total_tokens)
1365 return -EINVAL;
1366
1367 idxd->token_limit = val;
1368 return count;
1369 }
1370 static DEVICE_ATTR_RW(token_limit);
1371
1372 static ssize_t cdev_major_show(struct device *dev,
1373 struct device_attribute *attr, char *buf)
1374 {
1375 struct idxd_device *idxd = confdev_to_idxd(dev);
1376
1377 return sysfs_emit(buf, "%u\n", idxd->major);
1378 }
1379 static DEVICE_ATTR_RO(cdev_major);
1380
1381 static ssize_t cmd_status_show(struct device *dev,
1382 struct device_attribute *attr, char *buf)
1383 {
1384 struct idxd_device *idxd = confdev_to_idxd(dev);
1385
1386 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1387 }
1388 static DEVICE_ATTR_RO(cmd_status);
1389
1390 static struct attribute *idxd_device_attributes[] = {
1391 &dev_attr_version.attr,
1392 &dev_attr_max_groups.attr,
1393 &dev_attr_max_work_queues.attr,
1394 &dev_attr_max_work_queues_size.attr,
1395 &dev_attr_max_engines.attr,
1396 &dev_attr_numa_node.attr,
1397 &dev_attr_max_batch_size.attr,
1398 &dev_attr_max_transfer_size.attr,
1399 &dev_attr_op_cap.attr,
1400 &dev_attr_gen_cap.attr,
1401 &dev_attr_configurable.attr,
1402 &dev_attr_clients.attr,
1403 &dev_attr_pasid_enabled.attr,
1404 &dev_attr_state.attr,
1405 &dev_attr_errors.attr,
1406 &dev_attr_max_tokens.attr,
1407 &dev_attr_token_limit.attr,
1408 &dev_attr_cdev_major.attr,
1409 &dev_attr_cmd_status.attr,
1410 NULL,
1411 };
1412
1413 static const struct attribute_group idxd_device_attribute_group = {
1414 .attrs = idxd_device_attributes,
1415 };
1416
1417 static const struct attribute_group *idxd_attribute_groups[] = {
1418 &idxd_device_attribute_group,
1419 NULL,
1420 };
1421
1422 static void idxd_conf_device_release(struct device *dev)
1423 {
1424 struct idxd_device *idxd = confdev_to_idxd(dev);
1425
1426 kfree(idxd->groups);
1427 kfree(idxd->wqs);
1428 kfree(idxd->engines);
1429 kfree(idxd->irq_entries);
1430 kfree(idxd->int_handles);
1431 ida_free(&idxd_ida, idxd->id);
1432 kfree(idxd);
1433 }
1434
1435 struct device_type dsa_device_type = {
1436 .name = "dsa",
1437 .release = idxd_conf_device_release,
1438 .groups = idxd_attribute_groups,
1439 };
1440
1441 struct device_type iax_device_type = {
1442 .name = "iax",
1443 .release = idxd_conf_device_release,
1444 .groups = idxd_attribute_groups,
1445 };
1446
1447 static int idxd_register_engine_devices(struct idxd_device *idxd)
1448 {
1449 struct idxd_engine *engine;
1450 int i, j, rc;
1451
1452 for (i = 0; i < idxd->max_engines; i++) {
1453 engine = idxd->engines[i];
1454 rc = device_add(engine_confdev(engine));
1455 if (rc < 0)
1456 goto cleanup;
1457 }
1458
1459 return 0;
1460
1461 cleanup:
1462 j = i - 1;
1463 for (; i < idxd->max_engines; i++) {
1464 engine = idxd->engines[i];
1465 put_device(engine_confdev(engine));
1466 }
1467
1468 while (j--) {
1469 engine = idxd->engines[j];
1470 device_unregister(engine_confdev(engine));
1471 }
1472 return rc;
1473 }
1474
1475 static int idxd_register_group_devices(struct idxd_device *idxd)
1476 {
1477 struct idxd_group *group;
1478 int i, j, rc;
1479
1480 for (i = 0; i < idxd->max_groups; i++) {
1481 group = idxd->groups[i];
1482 rc = device_add(group_confdev(group));
1483 if (rc < 0)
1484 goto cleanup;
1485 }
1486
1487 return 0;
1488
1489 cleanup:
1490 j = i - 1;
1491 for (; i < idxd->max_groups; i++) {
1492 group = idxd->groups[i];
1493 put_device(group_confdev(group));
1494 }
1495
1496 while (j--) {
1497 group = idxd->groups[j];
1498 device_unregister(group_confdev(group));
1499 }
1500 return rc;
1501 }
1502
1503 static int idxd_register_wq_devices(struct idxd_device *idxd)
1504 {
1505 struct idxd_wq *wq;
1506 int i, rc, j;
1507
1508 for (i = 0; i < idxd->max_wqs; i++) {
1509 wq = idxd->wqs[i];
1510 rc = device_add(wq_confdev(wq));
1511 if (rc < 0)
1512 goto cleanup;
1513 }
1514
1515 return 0;
1516
1517 cleanup:
1518 j = i - 1;
1519 for (; i < idxd->max_wqs; i++) {
1520 wq = idxd->wqs[i];
1521 put_device(wq_confdev(wq));
1522 }
1523
1524 while (j--) {
1525 wq = idxd->wqs[j];
1526 device_unregister(wq_confdev(wq));
1527 }
1528 return rc;
1529 }
1530
1531 int idxd_register_devices(struct idxd_device *idxd)
1532 {
1533 struct device *dev = &idxd->pdev->dev;
1534 int rc, i;
1535
1536 rc = device_add(idxd_confdev(idxd));
1537 if (rc < 0)
1538 return rc;
1539
1540 rc = idxd_register_wq_devices(idxd);
1541 if (rc < 0) {
1542 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1543 goto err_wq;
1544 }
1545
1546 rc = idxd_register_engine_devices(idxd);
1547 if (rc < 0) {
1548 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1549 goto err_engine;
1550 }
1551
1552 rc = idxd_register_group_devices(idxd);
1553 if (rc < 0) {
1554 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1555 goto err_group;
1556 }
1557
1558 return 0;
1559
1560 err_group:
1561 for (i = 0; i < idxd->max_engines; i++)
1562 device_unregister(engine_confdev(idxd->engines[i]));
1563 err_engine:
1564 for (i = 0; i < idxd->max_wqs; i++)
1565 device_unregister(wq_confdev(idxd->wqs[i]));
1566 err_wq:
1567 device_del(idxd_confdev(idxd));
1568 return rc;
1569 }
1570
1571 void idxd_unregister_devices(struct idxd_device *idxd)
1572 {
1573 int i;
1574
1575 for (i = 0; i < idxd->max_wqs; i++) {
1576 struct idxd_wq *wq = idxd->wqs[i];
1577
1578 device_unregister(wq_confdev(wq));
1579 }
1580
1581 for (i = 0; i < idxd->max_engines; i++) {
1582 struct idxd_engine *engine = idxd->engines[i];
1583
1584 device_unregister(engine_confdev(engine));
1585 }
1586
1587 for (i = 0; i < idxd->max_groups; i++) {
1588 struct idxd_group *group = idxd->groups[i];
1589
1590 device_unregister(group_confdev(group));
1591 }
1592 }
1593
1594 int idxd_register_bus_type(void)
1595 {
1596 return bus_register(&dsa_bus_type);
1597 }
1598
1599 void idxd_unregister_bus_type(void)
1600 {
1601 bus_unregister(&dsa_bus_type);
1602 }