]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/dma/idxd/sysfs.c
dmaengine: idxd: add 'struct idxd_dev' as wrapper for conf_dev
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17 };
18
19 static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
21 {
22 int matched = 0;
23
24 if (is_idxd_dev(dev)) {
25 struct idxd_device *idxd = confdev_to_idxd(dev);
26
27 if (idxd->state != IDXD_DEV_CONF_READY)
28 return 0;
29 matched = 1;
30 } else if (is_idxd_wq_dev(dev)) {
31 struct idxd_wq *wq = confdev_to_wq(dev);
32 struct idxd_device *idxd = wq->idxd;
33
34 if (idxd->state < IDXD_DEV_CONF_READY)
35 return 0;
36
37 if (wq->state != IDXD_WQ_DISABLED) {
38 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
39 return 0;
40 }
41 matched = 1;
42 }
43
44 if (matched)
45 dev_dbg(dev, "%s matched\n", dev_name(dev));
46
47 return matched;
48 }
49
50 static int enable_wq(struct idxd_wq *wq)
51 {
52 struct idxd_device *idxd = wq->idxd;
53 struct device *dev = &idxd->pdev->dev;
54 unsigned long flags;
55 int rc;
56
57 mutex_lock(&wq->wq_lock);
58
59 if (idxd->state != IDXD_DEV_ENABLED) {
60 mutex_unlock(&wq->wq_lock);
61 dev_warn(dev, "Enabling while device not enabled.\n");
62 return -EPERM;
63 }
64
65 if (wq->state != IDXD_WQ_DISABLED) {
66 mutex_unlock(&wq->wq_lock);
67 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
68 return -EBUSY;
69 }
70
71 if (!wq->group) {
72 mutex_unlock(&wq->wq_lock);
73 dev_warn(dev, "WQ not attached to group.\n");
74 return -EINVAL;
75 }
76
77 if (strlen(wq->name) == 0) {
78 mutex_unlock(&wq->wq_lock);
79 dev_warn(dev, "WQ name not set.\n");
80 return -EINVAL;
81 }
82
83 /* Shared WQ checks */
84 if (wq_shared(wq)) {
85 if (!device_swq_supported(idxd)) {
86 dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 mutex_unlock(&wq->wq_lock);
88 return -ENXIO;
89 }
90 /*
91 * Shared wq with the threshold set to 0 means the user
92 * did not set the threshold or transitioned from a
93 * dedicated wq but did not set threshold. A value
94 * of 0 would effectively disable the shared wq. The
95 * driver does not allow a value of 0 to be set for
96 * threshold via sysfs.
97 */
98 if (wq->threshold == 0) {
99 dev_warn(dev, "Shared WQ and threshold 0.\n");
100 mutex_unlock(&wq->wq_lock);
101 return -EINVAL;
102 }
103 }
104
105 rc = idxd_wq_alloc_resources(wq);
106 if (rc < 0) {
107 mutex_unlock(&wq->wq_lock);
108 dev_warn(dev, "WQ resource alloc failed\n");
109 return rc;
110 }
111
112 spin_lock_irqsave(&idxd->dev_lock, flags);
113 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 rc = idxd_device_config(idxd);
115 spin_unlock_irqrestore(&idxd->dev_lock, flags);
116 if (rc < 0) {
117 mutex_unlock(&wq->wq_lock);
118 dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
119 return rc;
120 }
121
122 rc = idxd_wq_enable(wq);
123 if (rc < 0) {
124 mutex_unlock(&wq->wq_lock);
125 dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
126 return rc;
127 }
128
129 rc = idxd_wq_map_portal(wq);
130 if (rc < 0) {
131 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 rc = idxd_wq_disable(wq, false);
133 if (rc < 0)
134 dev_warn(dev, "IDXD wq disable failed\n");
135 mutex_unlock(&wq->wq_lock);
136 return rc;
137 }
138
139 wq->client_count = 0;
140
141 if (wq->type == IDXD_WQT_KERNEL) {
142 rc = idxd_wq_init_percpu_ref(wq);
143 if (rc < 0) {
144 dev_dbg(dev, "percpu_ref setup failed\n");
145 mutex_unlock(&wq->wq_lock);
146 return rc;
147 }
148 }
149
150 if (is_idxd_wq_dmaengine(wq)) {
151 rc = idxd_register_dma_channel(wq);
152 if (rc < 0) {
153 dev_dbg(dev, "DMA channel register failed\n");
154 mutex_unlock(&wq->wq_lock);
155 return rc;
156 }
157 } else if (is_idxd_wq_cdev(wq)) {
158 rc = idxd_wq_add_cdev(wq);
159 if (rc < 0) {
160 dev_dbg(dev, "Cdev creation failed\n");
161 mutex_unlock(&wq->wq_lock);
162 return rc;
163 }
164 }
165
166 mutex_unlock(&wq->wq_lock);
167 dev_info(dev, "wq %s enabled\n", dev_name(wq_confdev(wq)));
168
169 return 0;
170 }
171
172 static int idxd_config_bus_probe(struct device *dev)
173 {
174 int rc = 0;
175 unsigned long flags;
176
177 dev_dbg(dev, "%s called\n", __func__);
178
179 if (is_idxd_dev(dev)) {
180 struct idxd_device *idxd = confdev_to_idxd(dev);
181
182 if (idxd->state != IDXD_DEV_CONF_READY) {
183 dev_warn(dev, "Device not ready for config\n");
184 return -EBUSY;
185 }
186
187 if (!try_module_get(THIS_MODULE))
188 return -ENXIO;
189
190 /* Perform IDXD configuration and enabling */
191 spin_lock_irqsave(&idxd->dev_lock, flags);
192 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 rc = idxd_device_config(idxd);
194 spin_unlock_irqrestore(&idxd->dev_lock, flags);
195 if (rc < 0) {
196 module_put(THIS_MODULE);
197 dev_warn(dev, "Device config failed: %d\n", rc);
198 return rc;
199 }
200
201 /* start device */
202 rc = idxd_device_enable(idxd);
203 if (rc < 0) {
204 module_put(THIS_MODULE);
205 dev_warn(dev, "Device enable failed: %d\n", rc);
206 return rc;
207 }
208
209 dev_info(dev, "Device %s enabled\n", dev_name(dev));
210
211 rc = idxd_register_dma_device(idxd);
212 if (rc < 0) {
213 module_put(THIS_MODULE);
214 dev_dbg(dev, "Failed to register dmaengine device\n");
215 return rc;
216 }
217 return 0;
218 } else if (is_idxd_wq_dev(dev)) {
219 struct idxd_wq *wq = confdev_to_wq(dev);
220
221 return enable_wq(wq);
222 }
223
224 return -ENODEV;
225 }
226
227 static void disable_wq(struct idxd_wq *wq)
228 {
229 struct idxd_device *idxd = wq->idxd;
230 struct device *dev = &idxd->pdev->dev;
231
232 mutex_lock(&wq->wq_lock);
233 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(wq_confdev(wq)));
234 if (wq->state == IDXD_WQ_DISABLED) {
235 mutex_unlock(&wq->wq_lock);
236 return;
237 }
238
239 if (wq->type == IDXD_WQT_KERNEL)
240 idxd_wq_quiesce(wq);
241
242 if (is_idxd_wq_dmaengine(wq))
243 idxd_unregister_dma_channel(wq);
244 else if (is_idxd_wq_cdev(wq))
245 idxd_wq_del_cdev(wq);
246
247 if (idxd_wq_refcount(wq))
248 dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 wq->id, idxd_wq_refcount(wq));
250
251 idxd_wq_unmap_portal(wq);
252
253 idxd_wq_drain(wq);
254 idxd_wq_reset(wq);
255
256 idxd_wq_free_resources(wq);
257 wq->client_count = 0;
258 mutex_unlock(&wq->wq_lock);
259
260 dev_info(dev, "wq %s disabled\n", dev_name(wq_confdev(wq)));
261 }
262
263 static int idxd_config_bus_remove(struct device *dev)
264 {
265 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
266
267 /* disable workqueue here */
268 if (is_idxd_wq_dev(dev)) {
269 struct idxd_wq *wq = confdev_to_wq(dev);
270
271 disable_wq(wq);
272 } else if (is_idxd_dev(dev)) {
273 struct idxd_device *idxd = confdev_to_idxd(dev);
274 int i;
275
276 dev_dbg(dev, "%s removing dev %s\n", __func__,
277 dev_name(idxd_confdev(idxd)));
278 for (i = 0; i < idxd->max_wqs; i++) {
279 struct idxd_wq *wq = idxd->wqs[i];
280
281 if (wq->state == IDXD_WQ_DISABLED)
282 continue;
283 dev_warn(dev, "Active wq %d on disable %s.\n", i,
284 dev_name(wq_confdev(wq)));
285 device_release_driver(wq_confdev(wq));
286 }
287
288 idxd_unregister_dma_device(idxd);
289 idxd_device_disable(idxd);
290 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
291 idxd_device_reset(idxd);
292 module_put(THIS_MODULE);
293
294 dev_info(dev, "Device %s disabled\n", dev_name(dev));
295 }
296
297 return 0;
298 }
299
300 static void idxd_config_bus_shutdown(struct device *dev)
301 {
302 dev_dbg(dev, "%s called\n", __func__);
303 }
304
305 struct bus_type dsa_bus_type = {
306 .name = "dsa",
307 .match = idxd_config_bus_match,
308 .probe = idxd_config_bus_probe,
309 .remove = idxd_config_bus_remove,
310 .shutdown = idxd_config_bus_shutdown,
311 };
312
313 static struct idxd_device_driver dsa_drv = {
314 .name = "dsa",
315 };
316
317 /* IDXD generic driver setup */
318 int idxd_register_driver(void)
319 {
320 return idxd_driver_register(&dsa_drv);
321 }
322
323 void idxd_unregister_driver(void)
324 {
325 idxd_driver_unregister(&dsa_drv);
326 }
327
328 /* IDXD engine attributes */
329 static ssize_t engine_group_id_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
331 {
332 struct idxd_engine *engine = confdev_to_engine(dev);
333
334 if (engine->group)
335 return sysfs_emit(buf, "%d\n", engine->group->id);
336 else
337 return sysfs_emit(buf, "%d\n", -1);
338 }
339
340 static ssize_t engine_group_id_store(struct device *dev,
341 struct device_attribute *attr,
342 const char *buf, size_t count)
343 {
344 struct idxd_engine *engine = confdev_to_engine(dev);
345 struct idxd_device *idxd = engine->idxd;
346 long id;
347 int rc;
348 struct idxd_group *prevg;
349
350 rc = kstrtol(buf, 10, &id);
351 if (rc < 0)
352 return -EINVAL;
353
354 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
355 return -EPERM;
356
357 if (id > idxd->max_groups - 1 || id < -1)
358 return -EINVAL;
359
360 if (id == -1) {
361 if (engine->group) {
362 engine->group->num_engines--;
363 engine->group = NULL;
364 }
365 return count;
366 }
367
368 prevg = engine->group;
369
370 if (prevg)
371 prevg->num_engines--;
372 engine->group = idxd->groups[id];
373 engine->group->num_engines++;
374
375 return count;
376 }
377
378 static struct device_attribute dev_attr_engine_group =
379 __ATTR(group_id, 0644, engine_group_id_show,
380 engine_group_id_store);
381
382 static struct attribute *idxd_engine_attributes[] = {
383 &dev_attr_engine_group.attr,
384 NULL,
385 };
386
387 static const struct attribute_group idxd_engine_attribute_group = {
388 .attrs = idxd_engine_attributes,
389 };
390
391 static const struct attribute_group *idxd_engine_attribute_groups[] = {
392 &idxd_engine_attribute_group,
393 NULL,
394 };
395
396 static void idxd_conf_engine_release(struct device *dev)
397 {
398 struct idxd_engine *engine = confdev_to_engine(dev);
399
400 kfree(engine);
401 }
402
403 struct device_type idxd_engine_device_type = {
404 .name = "engine",
405 .release = idxd_conf_engine_release,
406 .groups = idxd_engine_attribute_groups,
407 };
408
409 /* Group attributes */
410
411 static void idxd_set_free_tokens(struct idxd_device *idxd)
412 {
413 int i, tokens;
414
415 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
416 struct idxd_group *g = idxd->groups[i];
417
418 tokens += g->tokens_reserved;
419 }
420
421 idxd->nr_tokens = idxd->max_tokens - tokens;
422 }
423
424 static ssize_t group_tokens_reserved_show(struct device *dev,
425 struct device_attribute *attr,
426 char *buf)
427 {
428 struct idxd_group *group = confdev_to_group(dev);
429
430 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
431 }
432
433 static ssize_t group_tokens_reserved_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
436 {
437 struct idxd_group *group = confdev_to_group(dev);
438 struct idxd_device *idxd = group->idxd;
439 unsigned long val;
440 int rc;
441
442 rc = kstrtoul(buf, 10, &val);
443 if (rc < 0)
444 return -EINVAL;
445
446 if (idxd->data->type == IDXD_TYPE_IAX)
447 return -EOPNOTSUPP;
448
449 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
450 return -EPERM;
451
452 if (idxd->state == IDXD_DEV_ENABLED)
453 return -EPERM;
454
455 if (val > idxd->max_tokens)
456 return -EINVAL;
457
458 if (val > idxd->nr_tokens + group->tokens_reserved)
459 return -EINVAL;
460
461 group->tokens_reserved = val;
462 idxd_set_free_tokens(idxd);
463 return count;
464 }
465
466 static struct device_attribute dev_attr_group_tokens_reserved =
467 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
468 group_tokens_reserved_store);
469
470 static ssize_t group_tokens_allowed_show(struct device *dev,
471 struct device_attribute *attr,
472 char *buf)
473 {
474 struct idxd_group *group = confdev_to_group(dev);
475
476 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
477 }
478
479 static ssize_t group_tokens_allowed_store(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf, size_t count)
482 {
483 struct idxd_group *group = confdev_to_group(dev);
484 struct idxd_device *idxd = group->idxd;
485 unsigned long val;
486 int rc;
487
488 rc = kstrtoul(buf, 10, &val);
489 if (rc < 0)
490 return -EINVAL;
491
492 if (idxd->data->type == IDXD_TYPE_IAX)
493 return -EOPNOTSUPP;
494
495 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
496 return -EPERM;
497
498 if (idxd->state == IDXD_DEV_ENABLED)
499 return -EPERM;
500
501 if (val < 4 * group->num_engines ||
502 val > group->tokens_reserved + idxd->nr_tokens)
503 return -EINVAL;
504
505 group->tokens_allowed = val;
506 return count;
507 }
508
509 static struct device_attribute dev_attr_group_tokens_allowed =
510 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
511 group_tokens_allowed_store);
512
513 static ssize_t group_use_token_limit_show(struct device *dev,
514 struct device_attribute *attr,
515 char *buf)
516 {
517 struct idxd_group *group = confdev_to_group(dev);
518
519 return sysfs_emit(buf, "%u\n", group->use_token_limit);
520 }
521
522 static ssize_t group_use_token_limit_store(struct device *dev,
523 struct device_attribute *attr,
524 const char *buf, size_t count)
525 {
526 struct idxd_group *group = confdev_to_group(dev);
527 struct idxd_device *idxd = group->idxd;
528 unsigned long val;
529 int rc;
530
531 rc = kstrtoul(buf, 10, &val);
532 if (rc < 0)
533 return -EINVAL;
534
535 if (idxd->data->type == IDXD_TYPE_IAX)
536 return -EOPNOTSUPP;
537
538 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
539 return -EPERM;
540
541 if (idxd->state == IDXD_DEV_ENABLED)
542 return -EPERM;
543
544 if (idxd->token_limit == 0)
545 return -EPERM;
546
547 group->use_token_limit = !!val;
548 return count;
549 }
550
551 static struct device_attribute dev_attr_group_use_token_limit =
552 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
553 group_use_token_limit_store);
554
555 static ssize_t group_engines_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557 {
558 struct idxd_group *group = confdev_to_group(dev);
559 int i, rc = 0;
560 struct idxd_device *idxd = group->idxd;
561
562 for (i = 0; i < idxd->max_engines; i++) {
563 struct idxd_engine *engine = idxd->engines[i];
564
565 if (!engine->group)
566 continue;
567
568 if (engine->group->id == group->id)
569 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
570 }
571
572 if (!rc)
573 return 0;
574 rc--;
575 rc += sysfs_emit_at(buf, rc, "\n");
576
577 return rc;
578 }
579
580 static struct device_attribute dev_attr_group_engines =
581 __ATTR(engines, 0444, group_engines_show, NULL);
582
583 static ssize_t group_work_queues_show(struct device *dev,
584 struct device_attribute *attr, char *buf)
585 {
586 struct idxd_group *group = confdev_to_group(dev);
587 int i, rc = 0;
588 struct idxd_device *idxd = group->idxd;
589
590 for (i = 0; i < idxd->max_wqs; i++) {
591 struct idxd_wq *wq = idxd->wqs[i];
592
593 if (!wq->group)
594 continue;
595
596 if (wq->group->id == group->id)
597 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
598 }
599
600 if (!rc)
601 return 0;
602 rc--;
603 rc += sysfs_emit_at(buf, rc, "\n");
604
605 return rc;
606 }
607
608 static struct device_attribute dev_attr_group_work_queues =
609 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
610
611 static ssize_t group_traffic_class_a_show(struct device *dev,
612 struct device_attribute *attr,
613 char *buf)
614 {
615 struct idxd_group *group = confdev_to_group(dev);
616
617 return sysfs_emit(buf, "%d\n", group->tc_a);
618 }
619
620 static ssize_t group_traffic_class_a_store(struct device *dev,
621 struct device_attribute *attr,
622 const char *buf, size_t count)
623 {
624 struct idxd_group *group = confdev_to_group(dev);
625 struct idxd_device *idxd = group->idxd;
626 long val;
627 int rc;
628
629 rc = kstrtol(buf, 10, &val);
630 if (rc < 0)
631 return -EINVAL;
632
633 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
634 return -EPERM;
635
636 if (idxd->state == IDXD_DEV_ENABLED)
637 return -EPERM;
638
639 if (val < 0 || val > 7)
640 return -EINVAL;
641
642 group->tc_a = val;
643 return count;
644 }
645
646 static struct device_attribute dev_attr_group_traffic_class_a =
647 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
648 group_traffic_class_a_store);
649
650 static ssize_t group_traffic_class_b_show(struct device *dev,
651 struct device_attribute *attr,
652 char *buf)
653 {
654 struct idxd_group *group = confdev_to_group(dev);
655
656 return sysfs_emit(buf, "%d\n", group->tc_b);
657 }
658
659 static ssize_t group_traffic_class_b_store(struct device *dev,
660 struct device_attribute *attr,
661 const char *buf, size_t count)
662 {
663 struct idxd_group *group = confdev_to_group(dev);
664 struct idxd_device *idxd = group->idxd;
665 long val;
666 int rc;
667
668 rc = kstrtol(buf, 10, &val);
669 if (rc < 0)
670 return -EINVAL;
671
672 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
673 return -EPERM;
674
675 if (idxd->state == IDXD_DEV_ENABLED)
676 return -EPERM;
677
678 if (val < 0 || val > 7)
679 return -EINVAL;
680
681 group->tc_b = val;
682 return count;
683 }
684
685 static struct device_attribute dev_attr_group_traffic_class_b =
686 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
687 group_traffic_class_b_store);
688
689 static struct attribute *idxd_group_attributes[] = {
690 &dev_attr_group_work_queues.attr,
691 &dev_attr_group_engines.attr,
692 &dev_attr_group_use_token_limit.attr,
693 &dev_attr_group_tokens_allowed.attr,
694 &dev_attr_group_tokens_reserved.attr,
695 &dev_attr_group_traffic_class_a.attr,
696 &dev_attr_group_traffic_class_b.attr,
697 NULL,
698 };
699
700 static const struct attribute_group idxd_group_attribute_group = {
701 .attrs = idxd_group_attributes,
702 };
703
704 static const struct attribute_group *idxd_group_attribute_groups[] = {
705 &idxd_group_attribute_group,
706 NULL,
707 };
708
709 static void idxd_conf_group_release(struct device *dev)
710 {
711 struct idxd_group *group = confdev_to_group(dev);
712
713 kfree(group);
714 }
715
716 struct device_type idxd_group_device_type = {
717 .name = "group",
718 .release = idxd_conf_group_release,
719 .groups = idxd_group_attribute_groups,
720 };
721
722 /* IDXD work queue attribs */
723 static ssize_t wq_clients_show(struct device *dev,
724 struct device_attribute *attr, char *buf)
725 {
726 struct idxd_wq *wq = confdev_to_wq(dev);
727
728 return sysfs_emit(buf, "%d\n", wq->client_count);
729 }
730
731 static struct device_attribute dev_attr_wq_clients =
732 __ATTR(clients, 0444, wq_clients_show, NULL);
733
734 static ssize_t wq_state_show(struct device *dev,
735 struct device_attribute *attr, char *buf)
736 {
737 struct idxd_wq *wq = confdev_to_wq(dev);
738
739 switch (wq->state) {
740 case IDXD_WQ_DISABLED:
741 return sysfs_emit(buf, "disabled\n");
742 case IDXD_WQ_ENABLED:
743 return sysfs_emit(buf, "enabled\n");
744 }
745
746 return sysfs_emit(buf, "unknown\n");
747 }
748
749 static struct device_attribute dev_attr_wq_state =
750 __ATTR(state, 0444, wq_state_show, NULL);
751
752 static ssize_t wq_group_id_show(struct device *dev,
753 struct device_attribute *attr, char *buf)
754 {
755 struct idxd_wq *wq = confdev_to_wq(dev);
756
757 if (wq->group)
758 return sysfs_emit(buf, "%u\n", wq->group->id);
759 else
760 return sysfs_emit(buf, "-1\n");
761 }
762
763 static ssize_t wq_group_id_store(struct device *dev,
764 struct device_attribute *attr,
765 const char *buf, size_t count)
766 {
767 struct idxd_wq *wq = confdev_to_wq(dev);
768 struct idxd_device *idxd = wq->idxd;
769 long id;
770 int rc;
771 struct idxd_group *prevg, *group;
772
773 rc = kstrtol(buf, 10, &id);
774 if (rc < 0)
775 return -EINVAL;
776
777 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
778 return -EPERM;
779
780 if (wq->state != IDXD_WQ_DISABLED)
781 return -EPERM;
782
783 if (id > idxd->max_groups - 1 || id < -1)
784 return -EINVAL;
785
786 if (id == -1) {
787 if (wq->group) {
788 wq->group->num_wqs--;
789 wq->group = NULL;
790 }
791 return count;
792 }
793
794 group = idxd->groups[id];
795 prevg = wq->group;
796
797 if (prevg)
798 prevg->num_wqs--;
799 wq->group = group;
800 group->num_wqs++;
801 return count;
802 }
803
804 static struct device_attribute dev_attr_wq_group_id =
805 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
806
807 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
808 char *buf)
809 {
810 struct idxd_wq *wq = confdev_to_wq(dev);
811
812 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
813 }
814
815 static ssize_t wq_mode_store(struct device *dev,
816 struct device_attribute *attr, const char *buf,
817 size_t count)
818 {
819 struct idxd_wq *wq = confdev_to_wq(dev);
820 struct idxd_device *idxd = wq->idxd;
821
822 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
823 return -EPERM;
824
825 if (wq->state != IDXD_WQ_DISABLED)
826 return -EPERM;
827
828 if (sysfs_streq(buf, "dedicated")) {
829 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
830 wq->threshold = 0;
831 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
832 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
833 } else {
834 return -EINVAL;
835 }
836
837 return count;
838 }
839
840 static struct device_attribute dev_attr_wq_mode =
841 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
842
843 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
844 char *buf)
845 {
846 struct idxd_wq *wq = confdev_to_wq(dev);
847
848 return sysfs_emit(buf, "%u\n", wq->size);
849 }
850
851 static int total_claimed_wq_size(struct idxd_device *idxd)
852 {
853 int i;
854 int wq_size = 0;
855
856 for (i = 0; i < idxd->max_wqs; i++) {
857 struct idxd_wq *wq = idxd->wqs[i];
858
859 wq_size += wq->size;
860 }
861
862 return wq_size;
863 }
864
865 static ssize_t wq_size_store(struct device *dev,
866 struct device_attribute *attr, const char *buf,
867 size_t count)
868 {
869 struct idxd_wq *wq = confdev_to_wq(dev);
870 unsigned long size;
871 struct idxd_device *idxd = wq->idxd;
872 int rc;
873
874 rc = kstrtoul(buf, 10, &size);
875 if (rc < 0)
876 return -EINVAL;
877
878 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
879 return -EPERM;
880
881 if (idxd->state == IDXD_DEV_ENABLED)
882 return -EPERM;
883
884 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
885 return -EINVAL;
886
887 wq->size = size;
888 return count;
889 }
890
891 static struct device_attribute dev_attr_wq_size =
892 __ATTR(size, 0644, wq_size_show, wq_size_store);
893
894 static ssize_t wq_priority_show(struct device *dev,
895 struct device_attribute *attr, char *buf)
896 {
897 struct idxd_wq *wq = confdev_to_wq(dev);
898
899 return sysfs_emit(buf, "%u\n", wq->priority);
900 }
901
902 static ssize_t wq_priority_store(struct device *dev,
903 struct device_attribute *attr,
904 const char *buf, size_t count)
905 {
906 struct idxd_wq *wq = confdev_to_wq(dev);
907 unsigned long prio;
908 struct idxd_device *idxd = wq->idxd;
909 int rc;
910
911 rc = kstrtoul(buf, 10, &prio);
912 if (rc < 0)
913 return -EINVAL;
914
915 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
916 return -EPERM;
917
918 if (wq->state != IDXD_WQ_DISABLED)
919 return -EPERM;
920
921 if (prio > IDXD_MAX_PRIORITY)
922 return -EINVAL;
923
924 wq->priority = prio;
925 return count;
926 }
927
928 static struct device_attribute dev_attr_wq_priority =
929 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
930
931 static ssize_t wq_block_on_fault_show(struct device *dev,
932 struct device_attribute *attr, char *buf)
933 {
934 struct idxd_wq *wq = confdev_to_wq(dev);
935
936 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
937 }
938
939 static ssize_t wq_block_on_fault_store(struct device *dev,
940 struct device_attribute *attr,
941 const char *buf, size_t count)
942 {
943 struct idxd_wq *wq = confdev_to_wq(dev);
944 struct idxd_device *idxd = wq->idxd;
945 bool bof;
946 int rc;
947
948 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
949 return -EPERM;
950
951 if (wq->state != IDXD_WQ_DISABLED)
952 return -ENXIO;
953
954 rc = kstrtobool(buf, &bof);
955 if (rc < 0)
956 return rc;
957
958 if (bof)
959 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
960 else
961 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
962
963 return count;
964 }
965
966 static struct device_attribute dev_attr_wq_block_on_fault =
967 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
968 wq_block_on_fault_store);
969
970 static ssize_t wq_threshold_show(struct device *dev,
971 struct device_attribute *attr, char *buf)
972 {
973 struct idxd_wq *wq = confdev_to_wq(dev);
974
975 return sysfs_emit(buf, "%u\n", wq->threshold);
976 }
977
978 static ssize_t wq_threshold_store(struct device *dev,
979 struct device_attribute *attr,
980 const char *buf, size_t count)
981 {
982 struct idxd_wq *wq = confdev_to_wq(dev);
983 struct idxd_device *idxd = wq->idxd;
984 unsigned int val;
985 int rc;
986
987 rc = kstrtouint(buf, 0, &val);
988 if (rc < 0)
989 return -EINVAL;
990
991 if (val > wq->size || val <= 0)
992 return -EINVAL;
993
994 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
995 return -EPERM;
996
997 if (wq->state != IDXD_WQ_DISABLED)
998 return -ENXIO;
999
1000 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1001 return -EINVAL;
1002
1003 wq->threshold = val;
1004
1005 return count;
1006 }
1007
1008 static struct device_attribute dev_attr_wq_threshold =
1009 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1010
1011 static ssize_t wq_type_show(struct device *dev,
1012 struct device_attribute *attr, char *buf)
1013 {
1014 struct idxd_wq *wq = confdev_to_wq(dev);
1015
1016 switch (wq->type) {
1017 case IDXD_WQT_KERNEL:
1018 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1019 case IDXD_WQT_USER:
1020 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1021 case IDXD_WQT_NONE:
1022 default:
1023 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1024 }
1025
1026 return -EINVAL;
1027 }
1028
1029 static ssize_t wq_type_store(struct device *dev,
1030 struct device_attribute *attr, const char *buf,
1031 size_t count)
1032 {
1033 struct idxd_wq *wq = confdev_to_wq(dev);
1034 enum idxd_wq_type old_type;
1035
1036 if (wq->state != IDXD_WQ_DISABLED)
1037 return -EPERM;
1038
1039 old_type = wq->type;
1040 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1041 wq->type = IDXD_WQT_NONE;
1042 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1043 wq->type = IDXD_WQT_KERNEL;
1044 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1045 wq->type = IDXD_WQT_USER;
1046 else
1047 return -EINVAL;
1048
1049 /* If we are changing queue type, clear the name */
1050 if (wq->type != old_type)
1051 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1052
1053 return count;
1054 }
1055
1056 static struct device_attribute dev_attr_wq_type =
1057 __ATTR(type, 0644, wq_type_show, wq_type_store);
1058
1059 static ssize_t wq_name_show(struct device *dev,
1060 struct device_attribute *attr, char *buf)
1061 {
1062 struct idxd_wq *wq = confdev_to_wq(dev);
1063
1064 return sysfs_emit(buf, "%s\n", wq->name);
1065 }
1066
1067 static ssize_t wq_name_store(struct device *dev,
1068 struct device_attribute *attr, const char *buf,
1069 size_t count)
1070 {
1071 struct idxd_wq *wq = confdev_to_wq(dev);
1072
1073 if (wq->state != IDXD_WQ_DISABLED)
1074 return -EPERM;
1075
1076 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1077 return -EINVAL;
1078
1079 /*
1080 * This is temporarily placed here until we have SVM support for
1081 * dmaengine.
1082 */
1083 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1084 return -EOPNOTSUPP;
1085
1086 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1087 strncpy(wq->name, buf, WQ_NAME_SIZE);
1088 strreplace(wq->name, '\n', '\0');
1089 return count;
1090 }
1091
1092 static struct device_attribute dev_attr_wq_name =
1093 __ATTR(name, 0644, wq_name_show, wq_name_store);
1094
1095 static ssize_t wq_cdev_minor_show(struct device *dev,
1096 struct device_attribute *attr, char *buf)
1097 {
1098 struct idxd_wq *wq = confdev_to_wq(dev);
1099 int minor = -1;
1100
1101 mutex_lock(&wq->wq_lock);
1102 if (wq->idxd_cdev)
1103 minor = wq->idxd_cdev->minor;
1104 mutex_unlock(&wq->wq_lock);
1105
1106 if (minor == -1)
1107 return -ENXIO;
1108 return sysfs_emit(buf, "%d\n", minor);
1109 }
1110
1111 static struct device_attribute dev_attr_wq_cdev_minor =
1112 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1113
1114 static int __get_sysfs_u64(const char *buf, u64 *val)
1115 {
1116 int rc;
1117
1118 rc = kstrtou64(buf, 0, val);
1119 if (rc < 0)
1120 return -EINVAL;
1121
1122 if (*val == 0)
1123 return -EINVAL;
1124
1125 *val = roundup_pow_of_two(*val);
1126 return 0;
1127 }
1128
1129 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1130 char *buf)
1131 {
1132 struct idxd_wq *wq = confdev_to_wq(dev);
1133
1134 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1135 }
1136
1137 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1138 const char *buf, size_t count)
1139 {
1140 struct idxd_wq *wq = confdev_to_wq(dev);
1141 struct idxd_device *idxd = wq->idxd;
1142 u64 xfer_size;
1143 int rc;
1144
1145 if (wq->state != IDXD_WQ_DISABLED)
1146 return -EPERM;
1147
1148 rc = __get_sysfs_u64(buf, &xfer_size);
1149 if (rc < 0)
1150 return rc;
1151
1152 if (xfer_size > idxd->max_xfer_bytes)
1153 return -EINVAL;
1154
1155 wq->max_xfer_bytes = xfer_size;
1156
1157 return count;
1158 }
1159
1160 static struct device_attribute dev_attr_wq_max_transfer_size =
1161 __ATTR(max_transfer_size, 0644,
1162 wq_max_transfer_size_show, wq_max_transfer_size_store);
1163
1164 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1165 {
1166 struct idxd_wq *wq = confdev_to_wq(dev);
1167
1168 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1169 }
1170
1171 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1172 const char *buf, size_t count)
1173 {
1174 struct idxd_wq *wq = confdev_to_wq(dev);
1175 struct idxd_device *idxd = wq->idxd;
1176 u64 batch_size;
1177 int rc;
1178
1179 if (wq->state != IDXD_WQ_DISABLED)
1180 return -EPERM;
1181
1182 rc = __get_sysfs_u64(buf, &batch_size);
1183 if (rc < 0)
1184 return rc;
1185
1186 if (batch_size > idxd->max_batch_size)
1187 return -EINVAL;
1188
1189 wq->max_batch_size = (u32)batch_size;
1190
1191 return count;
1192 }
1193
1194 static struct device_attribute dev_attr_wq_max_batch_size =
1195 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1196
1197 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1198 {
1199 struct idxd_wq *wq = confdev_to_wq(dev);
1200
1201 return sysfs_emit(buf, "%u\n", wq->ats_dis);
1202 }
1203
1204 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1205 const char *buf, size_t count)
1206 {
1207 struct idxd_wq *wq = confdev_to_wq(dev);
1208 struct idxd_device *idxd = wq->idxd;
1209 bool ats_dis;
1210 int rc;
1211
1212 if (wq->state != IDXD_WQ_DISABLED)
1213 return -EPERM;
1214
1215 if (!idxd->hw.wq_cap.wq_ats_support)
1216 return -EOPNOTSUPP;
1217
1218 rc = kstrtobool(buf, &ats_dis);
1219 if (rc < 0)
1220 return rc;
1221
1222 wq->ats_dis = ats_dis;
1223
1224 return count;
1225 }
1226
1227 static struct device_attribute dev_attr_wq_ats_disable =
1228 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1229
1230 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1231 {
1232 struct idxd_wq *wq = confdev_to_wq(dev);
1233 struct idxd_device *idxd = wq->idxd;
1234 u32 occup, offset;
1235
1236 if (!idxd->hw.wq_cap.occupancy)
1237 return -EOPNOTSUPP;
1238
1239 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1240 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1241
1242 return sysfs_emit(buf, "%u\n", occup);
1243 }
1244
1245 static struct device_attribute dev_attr_wq_occupancy =
1246 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1247
1248 static struct attribute *idxd_wq_attributes[] = {
1249 &dev_attr_wq_clients.attr,
1250 &dev_attr_wq_state.attr,
1251 &dev_attr_wq_group_id.attr,
1252 &dev_attr_wq_mode.attr,
1253 &dev_attr_wq_size.attr,
1254 &dev_attr_wq_priority.attr,
1255 &dev_attr_wq_block_on_fault.attr,
1256 &dev_attr_wq_threshold.attr,
1257 &dev_attr_wq_type.attr,
1258 &dev_attr_wq_name.attr,
1259 &dev_attr_wq_cdev_minor.attr,
1260 &dev_attr_wq_max_transfer_size.attr,
1261 &dev_attr_wq_max_batch_size.attr,
1262 &dev_attr_wq_ats_disable.attr,
1263 &dev_attr_wq_occupancy.attr,
1264 NULL,
1265 };
1266
1267 static const struct attribute_group idxd_wq_attribute_group = {
1268 .attrs = idxd_wq_attributes,
1269 };
1270
1271 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1272 &idxd_wq_attribute_group,
1273 NULL,
1274 };
1275
1276 static void idxd_conf_wq_release(struct device *dev)
1277 {
1278 struct idxd_wq *wq = confdev_to_wq(dev);
1279
1280 kfree(wq->wqcfg);
1281 kfree(wq);
1282 }
1283
1284 struct device_type idxd_wq_device_type = {
1285 .name = "wq",
1286 .release = idxd_conf_wq_release,
1287 .groups = idxd_wq_attribute_groups,
1288 };
1289
1290 /* IDXD device attribs */
1291 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1292 char *buf)
1293 {
1294 struct idxd_device *idxd = confdev_to_idxd(dev);
1295
1296 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1297 }
1298 static DEVICE_ATTR_RO(version);
1299
1300 static ssize_t max_work_queues_size_show(struct device *dev,
1301 struct device_attribute *attr,
1302 char *buf)
1303 {
1304 struct idxd_device *idxd = confdev_to_idxd(dev);
1305
1306 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1307 }
1308 static DEVICE_ATTR_RO(max_work_queues_size);
1309
1310 static ssize_t max_groups_show(struct device *dev,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct idxd_device *idxd = confdev_to_idxd(dev);
1314
1315 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1316 }
1317 static DEVICE_ATTR_RO(max_groups);
1318
1319 static ssize_t max_work_queues_show(struct device *dev,
1320 struct device_attribute *attr, char *buf)
1321 {
1322 struct idxd_device *idxd = confdev_to_idxd(dev);
1323
1324 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1325 }
1326 static DEVICE_ATTR_RO(max_work_queues);
1327
1328 static ssize_t max_engines_show(struct device *dev,
1329 struct device_attribute *attr, char *buf)
1330 {
1331 struct idxd_device *idxd = confdev_to_idxd(dev);
1332
1333 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1334 }
1335 static DEVICE_ATTR_RO(max_engines);
1336
1337 static ssize_t numa_node_show(struct device *dev,
1338 struct device_attribute *attr, char *buf)
1339 {
1340 struct idxd_device *idxd = confdev_to_idxd(dev);
1341
1342 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1343 }
1344 static DEVICE_ATTR_RO(numa_node);
1345
1346 static ssize_t max_batch_size_show(struct device *dev,
1347 struct device_attribute *attr, char *buf)
1348 {
1349 struct idxd_device *idxd = confdev_to_idxd(dev);
1350
1351 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1352 }
1353 static DEVICE_ATTR_RO(max_batch_size);
1354
1355 static ssize_t max_transfer_size_show(struct device *dev,
1356 struct device_attribute *attr,
1357 char *buf)
1358 {
1359 struct idxd_device *idxd = confdev_to_idxd(dev);
1360
1361 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1362 }
1363 static DEVICE_ATTR_RO(max_transfer_size);
1364
1365 static ssize_t op_cap_show(struct device *dev,
1366 struct device_attribute *attr, char *buf)
1367 {
1368 struct idxd_device *idxd = confdev_to_idxd(dev);
1369 int i, rc = 0;
1370
1371 for (i = 0; i < 4; i++)
1372 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1373
1374 rc--;
1375 rc += sysfs_emit_at(buf, rc, "\n");
1376 return rc;
1377 }
1378 static DEVICE_ATTR_RO(op_cap);
1379
1380 static ssize_t gen_cap_show(struct device *dev,
1381 struct device_attribute *attr, char *buf)
1382 {
1383 struct idxd_device *idxd = confdev_to_idxd(dev);
1384
1385 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1386 }
1387 static DEVICE_ATTR_RO(gen_cap);
1388
1389 static ssize_t configurable_show(struct device *dev,
1390 struct device_attribute *attr, char *buf)
1391 {
1392 struct idxd_device *idxd = confdev_to_idxd(dev);
1393
1394 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1395 }
1396 static DEVICE_ATTR_RO(configurable);
1397
1398 static ssize_t clients_show(struct device *dev,
1399 struct device_attribute *attr, char *buf)
1400 {
1401 struct idxd_device *idxd = confdev_to_idxd(dev);
1402 unsigned long flags;
1403 int count = 0, i;
1404
1405 spin_lock_irqsave(&idxd->dev_lock, flags);
1406 for (i = 0; i < idxd->max_wqs; i++) {
1407 struct idxd_wq *wq = idxd->wqs[i];
1408
1409 count += wq->client_count;
1410 }
1411 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1412
1413 return sysfs_emit(buf, "%d\n", count);
1414 }
1415 static DEVICE_ATTR_RO(clients);
1416
1417 static ssize_t pasid_enabled_show(struct device *dev,
1418 struct device_attribute *attr, char *buf)
1419 {
1420 struct idxd_device *idxd = confdev_to_idxd(dev);
1421
1422 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1423 }
1424 static DEVICE_ATTR_RO(pasid_enabled);
1425
1426 static ssize_t state_show(struct device *dev,
1427 struct device_attribute *attr, char *buf)
1428 {
1429 struct idxd_device *idxd = confdev_to_idxd(dev);
1430
1431 switch (idxd->state) {
1432 case IDXD_DEV_DISABLED:
1433 case IDXD_DEV_CONF_READY:
1434 return sysfs_emit(buf, "disabled\n");
1435 case IDXD_DEV_ENABLED:
1436 return sysfs_emit(buf, "enabled\n");
1437 case IDXD_DEV_HALTED:
1438 return sysfs_emit(buf, "halted\n");
1439 }
1440
1441 return sysfs_emit(buf, "unknown\n");
1442 }
1443 static DEVICE_ATTR_RO(state);
1444
1445 static ssize_t errors_show(struct device *dev,
1446 struct device_attribute *attr, char *buf)
1447 {
1448 struct idxd_device *idxd = confdev_to_idxd(dev);
1449 int i, out = 0;
1450 unsigned long flags;
1451
1452 spin_lock_irqsave(&idxd->dev_lock, flags);
1453 for (i = 0; i < 4; i++)
1454 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1455 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1456 out--;
1457 out += sysfs_emit_at(buf, out, "\n");
1458 return out;
1459 }
1460 static DEVICE_ATTR_RO(errors);
1461
1462 static ssize_t max_tokens_show(struct device *dev,
1463 struct device_attribute *attr, char *buf)
1464 {
1465 struct idxd_device *idxd = confdev_to_idxd(dev);
1466
1467 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1468 }
1469 static DEVICE_ATTR_RO(max_tokens);
1470
1471 static ssize_t token_limit_show(struct device *dev,
1472 struct device_attribute *attr, char *buf)
1473 {
1474 struct idxd_device *idxd = confdev_to_idxd(dev);
1475
1476 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1477 }
1478
1479 static ssize_t token_limit_store(struct device *dev,
1480 struct device_attribute *attr,
1481 const char *buf, size_t count)
1482 {
1483 struct idxd_device *idxd = confdev_to_idxd(dev);
1484 unsigned long val;
1485 int rc;
1486
1487 rc = kstrtoul(buf, 10, &val);
1488 if (rc < 0)
1489 return -EINVAL;
1490
1491 if (idxd->state == IDXD_DEV_ENABLED)
1492 return -EPERM;
1493
1494 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1495 return -EPERM;
1496
1497 if (!idxd->hw.group_cap.token_limit)
1498 return -EPERM;
1499
1500 if (val > idxd->hw.group_cap.total_tokens)
1501 return -EINVAL;
1502
1503 idxd->token_limit = val;
1504 return count;
1505 }
1506 static DEVICE_ATTR_RW(token_limit);
1507
1508 static ssize_t cdev_major_show(struct device *dev,
1509 struct device_attribute *attr, char *buf)
1510 {
1511 struct idxd_device *idxd = confdev_to_idxd(dev);
1512
1513 return sysfs_emit(buf, "%u\n", idxd->major);
1514 }
1515 static DEVICE_ATTR_RO(cdev_major);
1516
1517 static ssize_t cmd_status_show(struct device *dev,
1518 struct device_attribute *attr, char *buf)
1519 {
1520 struct idxd_device *idxd = confdev_to_idxd(dev);
1521
1522 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1523 }
1524 static DEVICE_ATTR_RO(cmd_status);
1525
1526 static struct attribute *idxd_device_attributes[] = {
1527 &dev_attr_version.attr,
1528 &dev_attr_max_groups.attr,
1529 &dev_attr_max_work_queues.attr,
1530 &dev_attr_max_work_queues_size.attr,
1531 &dev_attr_max_engines.attr,
1532 &dev_attr_numa_node.attr,
1533 &dev_attr_max_batch_size.attr,
1534 &dev_attr_max_transfer_size.attr,
1535 &dev_attr_op_cap.attr,
1536 &dev_attr_gen_cap.attr,
1537 &dev_attr_configurable.attr,
1538 &dev_attr_clients.attr,
1539 &dev_attr_pasid_enabled.attr,
1540 &dev_attr_state.attr,
1541 &dev_attr_errors.attr,
1542 &dev_attr_max_tokens.attr,
1543 &dev_attr_token_limit.attr,
1544 &dev_attr_cdev_major.attr,
1545 &dev_attr_cmd_status.attr,
1546 NULL,
1547 };
1548
1549 static const struct attribute_group idxd_device_attribute_group = {
1550 .attrs = idxd_device_attributes,
1551 };
1552
1553 static const struct attribute_group *idxd_attribute_groups[] = {
1554 &idxd_device_attribute_group,
1555 NULL,
1556 };
1557
1558 static void idxd_conf_device_release(struct device *dev)
1559 {
1560 struct idxd_device *idxd = confdev_to_idxd(dev);
1561
1562 kfree(idxd->groups);
1563 kfree(idxd->wqs);
1564 kfree(idxd->engines);
1565 kfree(idxd->irq_entries);
1566 kfree(idxd->int_handles);
1567 ida_free(&idxd_ida, idxd->id);
1568 kfree(idxd);
1569 }
1570
1571 struct device_type dsa_device_type = {
1572 .name = "dsa",
1573 .release = idxd_conf_device_release,
1574 .groups = idxd_attribute_groups,
1575 };
1576
1577 struct device_type iax_device_type = {
1578 .name = "iax",
1579 .release = idxd_conf_device_release,
1580 .groups = idxd_attribute_groups,
1581 };
1582
1583 static int idxd_register_engine_devices(struct idxd_device *idxd)
1584 {
1585 struct idxd_engine *engine;
1586 int i, j, rc;
1587
1588 for (i = 0; i < idxd->max_engines; i++) {
1589 engine = idxd->engines[i];
1590 rc = device_add(engine_confdev(engine));
1591 if (rc < 0)
1592 goto cleanup;
1593 }
1594
1595 return 0;
1596
1597 cleanup:
1598 j = i - 1;
1599 for (; i < idxd->max_engines; i++) {
1600 engine = idxd->engines[i];
1601 put_device(engine_confdev(engine));
1602 }
1603
1604 while (j--) {
1605 engine = idxd->engines[j];
1606 device_unregister(engine_confdev(engine));
1607 }
1608 return rc;
1609 }
1610
1611 static int idxd_register_group_devices(struct idxd_device *idxd)
1612 {
1613 struct idxd_group *group;
1614 int i, j, rc;
1615
1616 for (i = 0; i < idxd->max_groups; i++) {
1617 group = idxd->groups[i];
1618 rc = device_add(group_confdev(group));
1619 if (rc < 0)
1620 goto cleanup;
1621 }
1622
1623 return 0;
1624
1625 cleanup:
1626 j = i - 1;
1627 for (; i < idxd->max_groups; i++) {
1628 group = idxd->groups[i];
1629 put_device(group_confdev(group));
1630 }
1631
1632 while (j--) {
1633 group = idxd->groups[j];
1634 device_unregister(group_confdev(group));
1635 }
1636 return rc;
1637 }
1638
1639 static int idxd_register_wq_devices(struct idxd_device *idxd)
1640 {
1641 struct idxd_wq *wq;
1642 int i, rc, j;
1643
1644 for (i = 0; i < idxd->max_wqs; i++) {
1645 wq = idxd->wqs[i];
1646 rc = device_add(wq_confdev(wq));
1647 if (rc < 0)
1648 goto cleanup;
1649 }
1650
1651 return 0;
1652
1653 cleanup:
1654 j = i - 1;
1655 for (; i < idxd->max_wqs; i++) {
1656 wq = idxd->wqs[i];
1657 put_device(wq_confdev(wq));
1658 }
1659
1660 while (j--) {
1661 wq = idxd->wqs[j];
1662 device_unregister(wq_confdev(wq));
1663 }
1664 return rc;
1665 }
1666
1667 int idxd_register_devices(struct idxd_device *idxd)
1668 {
1669 struct device *dev = &idxd->pdev->dev;
1670 int rc, i;
1671
1672 rc = device_add(idxd_confdev(idxd));
1673 if (rc < 0)
1674 return rc;
1675
1676 rc = idxd_register_wq_devices(idxd);
1677 if (rc < 0) {
1678 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1679 goto err_wq;
1680 }
1681
1682 rc = idxd_register_engine_devices(idxd);
1683 if (rc < 0) {
1684 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1685 goto err_engine;
1686 }
1687
1688 rc = idxd_register_group_devices(idxd);
1689 if (rc < 0) {
1690 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1691 goto err_group;
1692 }
1693
1694 return 0;
1695
1696 err_group:
1697 for (i = 0; i < idxd->max_engines; i++)
1698 device_unregister(engine_confdev(idxd->engines[i]));
1699 err_engine:
1700 for (i = 0; i < idxd->max_wqs; i++)
1701 device_unregister(wq_confdev(idxd->wqs[i]));
1702 err_wq:
1703 device_del(idxd_confdev(idxd));
1704 return rc;
1705 }
1706
1707 void idxd_unregister_devices(struct idxd_device *idxd)
1708 {
1709 int i;
1710
1711 for (i = 0; i < idxd->max_wqs; i++) {
1712 struct idxd_wq *wq = idxd->wqs[i];
1713
1714 device_unregister(wq_confdev(wq));
1715 }
1716
1717 for (i = 0; i < idxd->max_engines; i++) {
1718 struct idxd_engine *engine = idxd->engines[i];
1719
1720 device_unregister(engine_confdev(engine));
1721 }
1722
1723 for (i = 0; i < idxd->max_groups; i++) {
1724 struct idxd_group *group = idxd->groups[i];
1725
1726 device_unregister(group_confdev(group));
1727 }
1728 }
1729
1730 int idxd_register_bus_type(void)
1731 {
1732 return bus_register(&dsa_bus_type);
1733 }
1734
1735 void idxd_unregister_bus_type(void)
1736 {
1737 bus_unregister(&dsa_bus_type);
1738 }