]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/dma/idxd/sysfs.c
dmaengine: idxd: add driver register helper
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
17 };
18
19 static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
21 {
22 int matched = 0;
23
24 if (is_idxd_dev(dev)) {
25 struct idxd_device *idxd = confdev_to_idxd(dev);
26
27 if (idxd->state != IDXD_DEV_CONF_READY)
28 return 0;
29 matched = 1;
30 } else if (is_idxd_wq_dev(dev)) {
31 struct idxd_wq *wq = confdev_to_wq(dev);
32 struct idxd_device *idxd = wq->idxd;
33
34 if (idxd->state < IDXD_DEV_CONF_READY)
35 return 0;
36
37 if (wq->state != IDXD_WQ_DISABLED) {
38 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
39 return 0;
40 }
41 matched = 1;
42 }
43
44 if (matched)
45 dev_dbg(dev, "%s matched\n", dev_name(dev));
46
47 return matched;
48 }
49
50 static int enable_wq(struct idxd_wq *wq)
51 {
52 struct idxd_device *idxd = wq->idxd;
53 struct device *dev = &idxd->pdev->dev;
54 unsigned long flags;
55 int rc;
56
57 mutex_lock(&wq->wq_lock);
58
59 if (idxd->state != IDXD_DEV_ENABLED) {
60 mutex_unlock(&wq->wq_lock);
61 dev_warn(dev, "Enabling while device not enabled.\n");
62 return -EPERM;
63 }
64
65 if (wq->state != IDXD_WQ_DISABLED) {
66 mutex_unlock(&wq->wq_lock);
67 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
68 return -EBUSY;
69 }
70
71 if (!wq->group) {
72 mutex_unlock(&wq->wq_lock);
73 dev_warn(dev, "WQ not attached to group.\n");
74 return -EINVAL;
75 }
76
77 if (strlen(wq->name) == 0) {
78 mutex_unlock(&wq->wq_lock);
79 dev_warn(dev, "WQ name not set.\n");
80 return -EINVAL;
81 }
82
83 /* Shared WQ checks */
84 if (wq_shared(wq)) {
85 if (!device_swq_supported(idxd)) {
86 dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 mutex_unlock(&wq->wq_lock);
88 return -ENXIO;
89 }
90 /*
91 * Shared wq with the threshold set to 0 means the user
92 * did not set the threshold or transitioned from a
93 * dedicated wq but did not set threshold. A value
94 * of 0 would effectively disable the shared wq. The
95 * driver does not allow a value of 0 to be set for
96 * threshold via sysfs.
97 */
98 if (wq->threshold == 0) {
99 dev_warn(dev, "Shared WQ and threshold 0.\n");
100 mutex_unlock(&wq->wq_lock);
101 return -EINVAL;
102 }
103 }
104
105 rc = idxd_wq_alloc_resources(wq);
106 if (rc < 0) {
107 mutex_unlock(&wq->wq_lock);
108 dev_warn(dev, "WQ resource alloc failed\n");
109 return rc;
110 }
111
112 spin_lock_irqsave(&idxd->dev_lock, flags);
113 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 rc = idxd_device_config(idxd);
115 spin_unlock_irqrestore(&idxd->dev_lock, flags);
116 if (rc < 0) {
117 mutex_unlock(&wq->wq_lock);
118 dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
119 return rc;
120 }
121
122 rc = idxd_wq_enable(wq);
123 if (rc < 0) {
124 mutex_unlock(&wq->wq_lock);
125 dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
126 return rc;
127 }
128
129 rc = idxd_wq_map_portal(wq);
130 if (rc < 0) {
131 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 rc = idxd_wq_disable(wq, false);
133 if (rc < 0)
134 dev_warn(dev, "IDXD wq disable failed\n");
135 mutex_unlock(&wq->wq_lock);
136 return rc;
137 }
138
139 wq->client_count = 0;
140
141 if (wq->type == IDXD_WQT_KERNEL) {
142 rc = idxd_wq_init_percpu_ref(wq);
143 if (rc < 0) {
144 dev_dbg(dev, "percpu_ref setup failed\n");
145 mutex_unlock(&wq->wq_lock);
146 return rc;
147 }
148 }
149
150 if (is_idxd_wq_dmaengine(wq)) {
151 rc = idxd_register_dma_channel(wq);
152 if (rc < 0) {
153 dev_dbg(dev, "DMA channel register failed\n");
154 mutex_unlock(&wq->wq_lock);
155 return rc;
156 }
157 } else if (is_idxd_wq_cdev(wq)) {
158 rc = idxd_wq_add_cdev(wq);
159 if (rc < 0) {
160 dev_dbg(dev, "Cdev creation failed\n");
161 mutex_unlock(&wq->wq_lock);
162 return rc;
163 }
164 }
165
166 mutex_unlock(&wq->wq_lock);
167 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
168
169 return 0;
170 }
171
172 static int idxd_config_bus_probe(struct device *dev)
173 {
174 int rc = 0;
175 unsigned long flags;
176
177 dev_dbg(dev, "%s called\n", __func__);
178
179 if (is_idxd_dev(dev)) {
180 struct idxd_device *idxd = confdev_to_idxd(dev);
181
182 if (idxd->state != IDXD_DEV_CONF_READY) {
183 dev_warn(dev, "Device not ready for config\n");
184 return -EBUSY;
185 }
186
187 if (!try_module_get(THIS_MODULE))
188 return -ENXIO;
189
190 /* Perform IDXD configuration and enabling */
191 spin_lock_irqsave(&idxd->dev_lock, flags);
192 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 rc = idxd_device_config(idxd);
194 spin_unlock_irqrestore(&idxd->dev_lock, flags);
195 if (rc < 0) {
196 module_put(THIS_MODULE);
197 dev_warn(dev, "Device config failed: %d\n", rc);
198 return rc;
199 }
200
201 /* start device */
202 rc = idxd_device_enable(idxd);
203 if (rc < 0) {
204 module_put(THIS_MODULE);
205 dev_warn(dev, "Device enable failed: %d\n", rc);
206 return rc;
207 }
208
209 dev_info(dev, "Device %s enabled\n", dev_name(dev));
210
211 rc = idxd_register_dma_device(idxd);
212 if (rc < 0) {
213 module_put(THIS_MODULE);
214 dev_dbg(dev, "Failed to register dmaengine device\n");
215 return rc;
216 }
217 return 0;
218 } else if (is_idxd_wq_dev(dev)) {
219 struct idxd_wq *wq = confdev_to_wq(dev);
220
221 return enable_wq(wq);
222 }
223
224 return -ENODEV;
225 }
226
227 static void disable_wq(struct idxd_wq *wq)
228 {
229 struct idxd_device *idxd = wq->idxd;
230 struct device *dev = &idxd->pdev->dev;
231
232 mutex_lock(&wq->wq_lock);
233 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
234 if (wq->state == IDXD_WQ_DISABLED) {
235 mutex_unlock(&wq->wq_lock);
236 return;
237 }
238
239 if (wq->type == IDXD_WQT_KERNEL)
240 idxd_wq_quiesce(wq);
241
242 if (is_idxd_wq_dmaengine(wq))
243 idxd_unregister_dma_channel(wq);
244 else if (is_idxd_wq_cdev(wq))
245 idxd_wq_del_cdev(wq);
246
247 if (idxd_wq_refcount(wq))
248 dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 wq->id, idxd_wq_refcount(wq));
250
251 idxd_wq_unmap_portal(wq);
252
253 idxd_wq_drain(wq);
254 idxd_wq_reset(wq);
255
256 idxd_wq_free_resources(wq);
257 wq->client_count = 0;
258 mutex_unlock(&wq->wq_lock);
259
260 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
261 }
262
263 static int idxd_config_bus_remove(struct device *dev)
264 {
265 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
266
267 /* disable workqueue here */
268 if (is_idxd_wq_dev(dev)) {
269 struct idxd_wq *wq = confdev_to_wq(dev);
270
271 disable_wq(wq);
272 } else if (is_idxd_dev(dev)) {
273 struct idxd_device *idxd = confdev_to_idxd(dev);
274 int i;
275
276 dev_dbg(dev, "%s removing dev %s\n", __func__,
277 dev_name(&idxd->conf_dev));
278 for (i = 0; i < idxd->max_wqs; i++) {
279 struct idxd_wq *wq = idxd->wqs[i];
280
281 if (wq->state == IDXD_WQ_DISABLED)
282 continue;
283 dev_warn(dev, "Active wq %d on disable %s.\n", i,
284 dev_name(&idxd->conf_dev));
285 device_release_driver(&wq->conf_dev);
286 }
287
288 idxd_unregister_dma_device(idxd);
289 idxd_device_disable(idxd);
290 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
291 idxd_device_reset(idxd);
292 module_put(THIS_MODULE);
293
294 dev_info(dev, "Device %s disabled\n", dev_name(dev));
295 }
296
297 return 0;
298 }
299
300 static void idxd_config_bus_shutdown(struct device *dev)
301 {
302 dev_dbg(dev, "%s called\n", __func__);
303 }
304
305 struct bus_type dsa_bus_type = {
306 .name = "dsa",
307 .match = idxd_config_bus_match,
308 .probe = idxd_config_bus_probe,
309 .remove = idxd_config_bus_remove,
310 .shutdown = idxd_config_bus_shutdown,
311 };
312
313 static struct idxd_device_driver dsa_drv = {
314 .drv = {
315 .name = "dsa",
316 },
317 };
318
319 /* IDXD generic driver setup */
320 int idxd_register_driver(void)
321 {
322 return idxd_driver_register(&dsa_drv);
323 }
324
325 void idxd_unregister_driver(void)
326 {
327 idxd_driver_unregister(&dsa_drv);
328 }
329
330 /* IDXD engine attributes */
331 static ssize_t engine_group_id_show(struct device *dev,
332 struct device_attribute *attr, char *buf)
333 {
334 struct idxd_engine *engine =
335 container_of(dev, struct idxd_engine, conf_dev);
336
337 if (engine->group)
338 return sysfs_emit(buf, "%d\n", engine->group->id);
339 else
340 return sysfs_emit(buf, "%d\n", -1);
341 }
342
343 static ssize_t engine_group_id_store(struct device *dev,
344 struct device_attribute *attr,
345 const char *buf, size_t count)
346 {
347 struct idxd_engine *engine =
348 container_of(dev, struct idxd_engine, conf_dev);
349 struct idxd_device *idxd = engine->idxd;
350 long id;
351 int rc;
352 struct idxd_group *prevg;
353
354 rc = kstrtol(buf, 10, &id);
355 if (rc < 0)
356 return -EINVAL;
357
358 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
359 return -EPERM;
360
361 if (id > idxd->max_groups - 1 || id < -1)
362 return -EINVAL;
363
364 if (id == -1) {
365 if (engine->group) {
366 engine->group->num_engines--;
367 engine->group = NULL;
368 }
369 return count;
370 }
371
372 prevg = engine->group;
373
374 if (prevg)
375 prevg->num_engines--;
376 engine->group = idxd->groups[id];
377 engine->group->num_engines++;
378
379 return count;
380 }
381
382 static struct device_attribute dev_attr_engine_group =
383 __ATTR(group_id, 0644, engine_group_id_show,
384 engine_group_id_store);
385
386 static struct attribute *idxd_engine_attributes[] = {
387 &dev_attr_engine_group.attr,
388 NULL,
389 };
390
391 static const struct attribute_group idxd_engine_attribute_group = {
392 .attrs = idxd_engine_attributes,
393 };
394
395 static const struct attribute_group *idxd_engine_attribute_groups[] = {
396 &idxd_engine_attribute_group,
397 NULL,
398 };
399
400 static void idxd_conf_engine_release(struct device *dev)
401 {
402 struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
403
404 kfree(engine);
405 }
406
407 struct device_type idxd_engine_device_type = {
408 .name = "engine",
409 .release = idxd_conf_engine_release,
410 .groups = idxd_engine_attribute_groups,
411 };
412
413 /* Group attributes */
414
415 static void idxd_set_free_tokens(struct idxd_device *idxd)
416 {
417 int i, tokens;
418
419 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
420 struct idxd_group *g = idxd->groups[i];
421
422 tokens += g->tokens_reserved;
423 }
424
425 idxd->nr_tokens = idxd->max_tokens - tokens;
426 }
427
428 static ssize_t group_tokens_reserved_show(struct device *dev,
429 struct device_attribute *attr,
430 char *buf)
431 {
432 struct idxd_group *group =
433 container_of(dev, struct idxd_group, conf_dev);
434
435 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
436 }
437
438 static ssize_t group_tokens_reserved_store(struct device *dev,
439 struct device_attribute *attr,
440 const char *buf, size_t count)
441 {
442 struct idxd_group *group =
443 container_of(dev, struct idxd_group, conf_dev);
444 struct idxd_device *idxd = group->idxd;
445 unsigned long val;
446 int rc;
447
448 rc = kstrtoul(buf, 10, &val);
449 if (rc < 0)
450 return -EINVAL;
451
452 if (idxd->data->type == IDXD_TYPE_IAX)
453 return -EOPNOTSUPP;
454
455 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
456 return -EPERM;
457
458 if (idxd->state == IDXD_DEV_ENABLED)
459 return -EPERM;
460
461 if (val > idxd->max_tokens)
462 return -EINVAL;
463
464 if (val > idxd->nr_tokens + group->tokens_reserved)
465 return -EINVAL;
466
467 group->tokens_reserved = val;
468 idxd_set_free_tokens(idxd);
469 return count;
470 }
471
472 static struct device_attribute dev_attr_group_tokens_reserved =
473 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
474 group_tokens_reserved_store);
475
476 static ssize_t group_tokens_allowed_show(struct device *dev,
477 struct device_attribute *attr,
478 char *buf)
479 {
480 struct idxd_group *group =
481 container_of(dev, struct idxd_group, conf_dev);
482
483 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
484 }
485
486 static ssize_t group_tokens_allowed_store(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf, size_t count)
489 {
490 struct idxd_group *group =
491 container_of(dev, struct idxd_group, conf_dev);
492 struct idxd_device *idxd = group->idxd;
493 unsigned long val;
494 int rc;
495
496 rc = kstrtoul(buf, 10, &val);
497 if (rc < 0)
498 return -EINVAL;
499
500 if (idxd->data->type == IDXD_TYPE_IAX)
501 return -EOPNOTSUPP;
502
503 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
504 return -EPERM;
505
506 if (idxd->state == IDXD_DEV_ENABLED)
507 return -EPERM;
508
509 if (val < 4 * group->num_engines ||
510 val > group->tokens_reserved + idxd->nr_tokens)
511 return -EINVAL;
512
513 group->tokens_allowed = val;
514 return count;
515 }
516
517 static struct device_attribute dev_attr_group_tokens_allowed =
518 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
519 group_tokens_allowed_store);
520
521 static ssize_t group_use_token_limit_show(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
524 {
525 struct idxd_group *group =
526 container_of(dev, struct idxd_group, conf_dev);
527
528 return sysfs_emit(buf, "%u\n", group->use_token_limit);
529 }
530
531 static ssize_t group_use_token_limit_store(struct device *dev,
532 struct device_attribute *attr,
533 const char *buf, size_t count)
534 {
535 struct idxd_group *group =
536 container_of(dev, struct idxd_group, conf_dev);
537 struct idxd_device *idxd = group->idxd;
538 unsigned long val;
539 int rc;
540
541 rc = kstrtoul(buf, 10, &val);
542 if (rc < 0)
543 return -EINVAL;
544
545 if (idxd->data->type == IDXD_TYPE_IAX)
546 return -EOPNOTSUPP;
547
548 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
549 return -EPERM;
550
551 if (idxd->state == IDXD_DEV_ENABLED)
552 return -EPERM;
553
554 if (idxd->token_limit == 0)
555 return -EPERM;
556
557 group->use_token_limit = !!val;
558 return count;
559 }
560
561 static struct device_attribute dev_attr_group_use_token_limit =
562 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
563 group_use_token_limit_store);
564
565 static ssize_t group_engines_show(struct device *dev,
566 struct device_attribute *attr, char *buf)
567 {
568 struct idxd_group *group =
569 container_of(dev, struct idxd_group, conf_dev);
570 int i, rc = 0;
571 struct idxd_device *idxd = group->idxd;
572
573 for (i = 0; i < idxd->max_engines; i++) {
574 struct idxd_engine *engine = idxd->engines[i];
575
576 if (!engine->group)
577 continue;
578
579 if (engine->group->id == group->id)
580 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
581 }
582
583 if (!rc)
584 return 0;
585 rc--;
586 rc += sysfs_emit_at(buf, rc, "\n");
587
588 return rc;
589 }
590
591 static struct device_attribute dev_attr_group_engines =
592 __ATTR(engines, 0444, group_engines_show, NULL);
593
594 static ssize_t group_work_queues_show(struct device *dev,
595 struct device_attribute *attr, char *buf)
596 {
597 struct idxd_group *group =
598 container_of(dev, struct idxd_group, conf_dev);
599 int i, rc = 0;
600 struct idxd_device *idxd = group->idxd;
601
602 for (i = 0; i < idxd->max_wqs; i++) {
603 struct idxd_wq *wq = idxd->wqs[i];
604
605 if (!wq->group)
606 continue;
607
608 if (wq->group->id == group->id)
609 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
610 }
611
612 if (!rc)
613 return 0;
614 rc--;
615 rc += sysfs_emit_at(buf, rc, "\n");
616
617 return rc;
618 }
619
620 static struct device_attribute dev_attr_group_work_queues =
621 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
622
623 static ssize_t group_traffic_class_a_show(struct device *dev,
624 struct device_attribute *attr,
625 char *buf)
626 {
627 struct idxd_group *group =
628 container_of(dev, struct idxd_group, conf_dev);
629
630 return sysfs_emit(buf, "%d\n", group->tc_a);
631 }
632
633 static ssize_t group_traffic_class_a_store(struct device *dev,
634 struct device_attribute *attr,
635 const char *buf, size_t count)
636 {
637 struct idxd_group *group =
638 container_of(dev, struct idxd_group, conf_dev);
639 struct idxd_device *idxd = group->idxd;
640 long val;
641 int rc;
642
643 rc = kstrtol(buf, 10, &val);
644 if (rc < 0)
645 return -EINVAL;
646
647 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
648 return -EPERM;
649
650 if (idxd->state == IDXD_DEV_ENABLED)
651 return -EPERM;
652
653 if (val < 0 || val > 7)
654 return -EINVAL;
655
656 group->tc_a = val;
657 return count;
658 }
659
660 static struct device_attribute dev_attr_group_traffic_class_a =
661 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
662 group_traffic_class_a_store);
663
664 static ssize_t group_traffic_class_b_show(struct device *dev,
665 struct device_attribute *attr,
666 char *buf)
667 {
668 struct idxd_group *group =
669 container_of(dev, struct idxd_group, conf_dev);
670
671 return sysfs_emit(buf, "%d\n", group->tc_b);
672 }
673
674 static ssize_t group_traffic_class_b_store(struct device *dev,
675 struct device_attribute *attr,
676 const char *buf, size_t count)
677 {
678 struct idxd_group *group =
679 container_of(dev, struct idxd_group, conf_dev);
680 struct idxd_device *idxd = group->idxd;
681 long val;
682 int rc;
683
684 rc = kstrtol(buf, 10, &val);
685 if (rc < 0)
686 return -EINVAL;
687
688 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
689 return -EPERM;
690
691 if (idxd->state == IDXD_DEV_ENABLED)
692 return -EPERM;
693
694 if (val < 0 || val > 7)
695 return -EINVAL;
696
697 group->tc_b = val;
698 return count;
699 }
700
701 static struct device_attribute dev_attr_group_traffic_class_b =
702 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
703 group_traffic_class_b_store);
704
705 static struct attribute *idxd_group_attributes[] = {
706 &dev_attr_group_work_queues.attr,
707 &dev_attr_group_engines.attr,
708 &dev_attr_group_use_token_limit.attr,
709 &dev_attr_group_tokens_allowed.attr,
710 &dev_attr_group_tokens_reserved.attr,
711 &dev_attr_group_traffic_class_a.attr,
712 &dev_attr_group_traffic_class_b.attr,
713 NULL,
714 };
715
716 static const struct attribute_group idxd_group_attribute_group = {
717 .attrs = idxd_group_attributes,
718 };
719
720 static const struct attribute_group *idxd_group_attribute_groups[] = {
721 &idxd_group_attribute_group,
722 NULL,
723 };
724
725 static void idxd_conf_group_release(struct device *dev)
726 {
727 struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
728
729 kfree(group);
730 }
731
732 struct device_type idxd_group_device_type = {
733 .name = "group",
734 .release = idxd_conf_group_release,
735 .groups = idxd_group_attribute_groups,
736 };
737
738 /* IDXD work queue attribs */
739 static ssize_t wq_clients_show(struct device *dev,
740 struct device_attribute *attr, char *buf)
741 {
742 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
743
744 return sysfs_emit(buf, "%d\n", wq->client_count);
745 }
746
747 static struct device_attribute dev_attr_wq_clients =
748 __ATTR(clients, 0444, wq_clients_show, NULL);
749
750 static ssize_t wq_state_show(struct device *dev,
751 struct device_attribute *attr, char *buf)
752 {
753 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
754
755 switch (wq->state) {
756 case IDXD_WQ_DISABLED:
757 return sysfs_emit(buf, "disabled\n");
758 case IDXD_WQ_ENABLED:
759 return sysfs_emit(buf, "enabled\n");
760 }
761
762 return sysfs_emit(buf, "unknown\n");
763 }
764
765 static struct device_attribute dev_attr_wq_state =
766 __ATTR(state, 0444, wq_state_show, NULL);
767
768 static ssize_t wq_group_id_show(struct device *dev,
769 struct device_attribute *attr, char *buf)
770 {
771 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
772
773 if (wq->group)
774 return sysfs_emit(buf, "%u\n", wq->group->id);
775 else
776 return sysfs_emit(buf, "-1\n");
777 }
778
779 static ssize_t wq_group_id_store(struct device *dev,
780 struct device_attribute *attr,
781 const char *buf, size_t count)
782 {
783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
784 struct idxd_device *idxd = wq->idxd;
785 long id;
786 int rc;
787 struct idxd_group *prevg, *group;
788
789 rc = kstrtol(buf, 10, &id);
790 if (rc < 0)
791 return -EINVAL;
792
793 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
794 return -EPERM;
795
796 if (wq->state != IDXD_WQ_DISABLED)
797 return -EPERM;
798
799 if (id > idxd->max_groups - 1 || id < -1)
800 return -EINVAL;
801
802 if (id == -1) {
803 if (wq->group) {
804 wq->group->num_wqs--;
805 wq->group = NULL;
806 }
807 return count;
808 }
809
810 group = idxd->groups[id];
811 prevg = wq->group;
812
813 if (prevg)
814 prevg->num_wqs--;
815 wq->group = group;
816 group->num_wqs++;
817 return count;
818 }
819
820 static struct device_attribute dev_attr_wq_group_id =
821 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
822
823 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
824 char *buf)
825 {
826 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
827
828 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
829 }
830
831 static ssize_t wq_mode_store(struct device *dev,
832 struct device_attribute *attr, const char *buf,
833 size_t count)
834 {
835 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
836 struct idxd_device *idxd = wq->idxd;
837
838 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
839 return -EPERM;
840
841 if (wq->state != IDXD_WQ_DISABLED)
842 return -EPERM;
843
844 if (sysfs_streq(buf, "dedicated")) {
845 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
846 wq->threshold = 0;
847 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
848 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
849 } else {
850 return -EINVAL;
851 }
852
853 return count;
854 }
855
856 static struct device_attribute dev_attr_wq_mode =
857 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
858
859 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
860 char *buf)
861 {
862 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
863
864 return sysfs_emit(buf, "%u\n", wq->size);
865 }
866
867 static int total_claimed_wq_size(struct idxd_device *idxd)
868 {
869 int i;
870 int wq_size = 0;
871
872 for (i = 0; i < idxd->max_wqs; i++) {
873 struct idxd_wq *wq = idxd->wqs[i];
874
875 wq_size += wq->size;
876 }
877
878 return wq_size;
879 }
880
881 static ssize_t wq_size_store(struct device *dev,
882 struct device_attribute *attr, const char *buf,
883 size_t count)
884 {
885 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
886 unsigned long size;
887 struct idxd_device *idxd = wq->idxd;
888 int rc;
889
890 rc = kstrtoul(buf, 10, &size);
891 if (rc < 0)
892 return -EINVAL;
893
894 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
895 return -EPERM;
896
897 if (idxd->state == IDXD_DEV_ENABLED)
898 return -EPERM;
899
900 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
901 return -EINVAL;
902
903 wq->size = size;
904 return count;
905 }
906
907 static struct device_attribute dev_attr_wq_size =
908 __ATTR(size, 0644, wq_size_show, wq_size_store);
909
910 static ssize_t wq_priority_show(struct device *dev,
911 struct device_attribute *attr, char *buf)
912 {
913 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
914
915 return sysfs_emit(buf, "%u\n", wq->priority);
916 }
917
918 static ssize_t wq_priority_store(struct device *dev,
919 struct device_attribute *attr,
920 const char *buf, size_t count)
921 {
922 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
923 unsigned long prio;
924 struct idxd_device *idxd = wq->idxd;
925 int rc;
926
927 rc = kstrtoul(buf, 10, &prio);
928 if (rc < 0)
929 return -EINVAL;
930
931 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
932 return -EPERM;
933
934 if (wq->state != IDXD_WQ_DISABLED)
935 return -EPERM;
936
937 if (prio > IDXD_MAX_PRIORITY)
938 return -EINVAL;
939
940 wq->priority = prio;
941 return count;
942 }
943
944 static struct device_attribute dev_attr_wq_priority =
945 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
946
947 static ssize_t wq_block_on_fault_show(struct device *dev,
948 struct device_attribute *attr, char *buf)
949 {
950 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
951
952 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
953 }
954
955 static ssize_t wq_block_on_fault_store(struct device *dev,
956 struct device_attribute *attr,
957 const char *buf, size_t count)
958 {
959 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
960 struct idxd_device *idxd = wq->idxd;
961 bool bof;
962 int rc;
963
964 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
965 return -EPERM;
966
967 if (wq->state != IDXD_WQ_DISABLED)
968 return -ENXIO;
969
970 rc = kstrtobool(buf, &bof);
971 if (rc < 0)
972 return rc;
973
974 if (bof)
975 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
976 else
977 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
978
979 return count;
980 }
981
982 static struct device_attribute dev_attr_wq_block_on_fault =
983 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
984 wq_block_on_fault_store);
985
986 static ssize_t wq_threshold_show(struct device *dev,
987 struct device_attribute *attr, char *buf)
988 {
989 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
990
991 return sysfs_emit(buf, "%u\n", wq->threshold);
992 }
993
994 static ssize_t wq_threshold_store(struct device *dev,
995 struct device_attribute *attr,
996 const char *buf, size_t count)
997 {
998 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
999 struct idxd_device *idxd = wq->idxd;
1000 unsigned int val;
1001 int rc;
1002
1003 rc = kstrtouint(buf, 0, &val);
1004 if (rc < 0)
1005 return -EINVAL;
1006
1007 if (val > wq->size || val <= 0)
1008 return -EINVAL;
1009
1010 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1011 return -EPERM;
1012
1013 if (wq->state != IDXD_WQ_DISABLED)
1014 return -ENXIO;
1015
1016 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1017 return -EINVAL;
1018
1019 wq->threshold = val;
1020
1021 return count;
1022 }
1023
1024 static struct device_attribute dev_attr_wq_threshold =
1025 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1026
1027 static ssize_t wq_type_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1029 {
1030 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1031
1032 switch (wq->type) {
1033 case IDXD_WQT_KERNEL:
1034 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1035 case IDXD_WQT_USER:
1036 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1037 case IDXD_WQT_NONE:
1038 default:
1039 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1040 }
1041
1042 return -EINVAL;
1043 }
1044
1045 static ssize_t wq_type_store(struct device *dev,
1046 struct device_attribute *attr, const char *buf,
1047 size_t count)
1048 {
1049 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1050 enum idxd_wq_type old_type;
1051
1052 if (wq->state != IDXD_WQ_DISABLED)
1053 return -EPERM;
1054
1055 old_type = wq->type;
1056 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1057 wq->type = IDXD_WQT_NONE;
1058 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1059 wq->type = IDXD_WQT_KERNEL;
1060 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1061 wq->type = IDXD_WQT_USER;
1062 else
1063 return -EINVAL;
1064
1065 /* If we are changing queue type, clear the name */
1066 if (wq->type != old_type)
1067 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1068
1069 return count;
1070 }
1071
1072 static struct device_attribute dev_attr_wq_type =
1073 __ATTR(type, 0644, wq_type_show, wq_type_store);
1074
1075 static ssize_t wq_name_show(struct device *dev,
1076 struct device_attribute *attr, char *buf)
1077 {
1078 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1079
1080 return sysfs_emit(buf, "%s\n", wq->name);
1081 }
1082
1083 static ssize_t wq_name_store(struct device *dev,
1084 struct device_attribute *attr, const char *buf,
1085 size_t count)
1086 {
1087 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1088
1089 if (wq->state != IDXD_WQ_DISABLED)
1090 return -EPERM;
1091
1092 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1093 return -EINVAL;
1094
1095 /*
1096 * This is temporarily placed here until we have SVM support for
1097 * dmaengine.
1098 */
1099 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1100 return -EOPNOTSUPP;
1101
1102 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1103 strncpy(wq->name, buf, WQ_NAME_SIZE);
1104 strreplace(wq->name, '\n', '\0');
1105 return count;
1106 }
1107
1108 static struct device_attribute dev_attr_wq_name =
1109 __ATTR(name, 0644, wq_name_show, wq_name_store);
1110
1111 static ssize_t wq_cdev_minor_show(struct device *dev,
1112 struct device_attribute *attr, char *buf)
1113 {
1114 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1115 int minor = -1;
1116
1117 mutex_lock(&wq->wq_lock);
1118 if (wq->idxd_cdev)
1119 minor = wq->idxd_cdev->minor;
1120 mutex_unlock(&wq->wq_lock);
1121
1122 if (minor == -1)
1123 return -ENXIO;
1124 return sysfs_emit(buf, "%d\n", minor);
1125 }
1126
1127 static struct device_attribute dev_attr_wq_cdev_minor =
1128 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1129
1130 static int __get_sysfs_u64(const char *buf, u64 *val)
1131 {
1132 int rc;
1133
1134 rc = kstrtou64(buf, 0, val);
1135 if (rc < 0)
1136 return -EINVAL;
1137
1138 if (*val == 0)
1139 return -EINVAL;
1140
1141 *val = roundup_pow_of_two(*val);
1142 return 0;
1143 }
1144
1145 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1146 char *buf)
1147 {
1148 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1149
1150 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1151 }
1152
1153 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1154 const char *buf, size_t count)
1155 {
1156 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1157 struct idxd_device *idxd = wq->idxd;
1158 u64 xfer_size;
1159 int rc;
1160
1161 if (wq->state != IDXD_WQ_DISABLED)
1162 return -EPERM;
1163
1164 rc = __get_sysfs_u64(buf, &xfer_size);
1165 if (rc < 0)
1166 return rc;
1167
1168 if (xfer_size > idxd->max_xfer_bytes)
1169 return -EINVAL;
1170
1171 wq->max_xfer_bytes = xfer_size;
1172
1173 return count;
1174 }
1175
1176 static struct device_attribute dev_attr_wq_max_transfer_size =
1177 __ATTR(max_transfer_size, 0644,
1178 wq_max_transfer_size_show, wq_max_transfer_size_store);
1179
1180 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1181 {
1182 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1183
1184 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1185 }
1186
1187 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1188 const char *buf, size_t count)
1189 {
1190 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1191 struct idxd_device *idxd = wq->idxd;
1192 u64 batch_size;
1193 int rc;
1194
1195 if (wq->state != IDXD_WQ_DISABLED)
1196 return -EPERM;
1197
1198 rc = __get_sysfs_u64(buf, &batch_size);
1199 if (rc < 0)
1200 return rc;
1201
1202 if (batch_size > idxd->max_batch_size)
1203 return -EINVAL;
1204
1205 wq->max_batch_size = (u32)batch_size;
1206
1207 return count;
1208 }
1209
1210 static struct device_attribute dev_attr_wq_max_batch_size =
1211 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1212
1213 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1214 {
1215 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1216
1217 return sysfs_emit(buf, "%u\n", wq->ats_dis);
1218 }
1219
1220 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1221 const char *buf, size_t count)
1222 {
1223 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1224 struct idxd_device *idxd = wq->idxd;
1225 bool ats_dis;
1226 int rc;
1227
1228 if (wq->state != IDXD_WQ_DISABLED)
1229 return -EPERM;
1230
1231 if (!idxd->hw.wq_cap.wq_ats_support)
1232 return -EOPNOTSUPP;
1233
1234 rc = kstrtobool(buf, &ats_dis);
1235 if (rc < 0)
1236 return rc;
1237
1238 wq->ats_dis = ats_dis;
1239
1240 return count;
1241 }
1242
1243 static struct device_attribute dev_attr_wq_ats_disable =
1244 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1245
1246 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1247 {
1248 struct idxd_wq *wq = confdev_to_wq(dev);
1249 struct idxd_device *idxd = wq->idxd;
1250 u32 occup, offset;
1251
1252 if (!idxd->hw.wq_cap.occupancy)
1253 return -EOPNOTSUPP;
1254
1255 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1256 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1257
1258 return sysfs_emit(buf, "%u\n", occup);
1259 }
1260
1261 static struct device_attribute dev_attr_wq_occupancy =
1262 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1263
1264 static struct attribute *idxd_wq_attributes[] = {
1265 &dev_attr_wq_clients.attr,
1266 &dev_attr_wq_state.attr,
1267 &dev_attr_wq_group_id.attr,
1268 &dev_attr_wq_mode.attr,
1269 &dev_attr_wq_size.attr,
1270 &dev_attr_wq_priority.attr,
1271 &dev_attr_wq_block_on_fault.attr,
1272 &dev_attr_wq_threshold.attr,
1273 &dev_attr_wq_type.attr,
1274 &dev_attr_wq_name.attr,
1275 &dev_attr_wq_cdev_minor.attr,
1276 &dev_attr_wq_max_transfer_size.attr,
1277 &dev_attr_wq_max_batch_size.attr,
1278 &dev_attr_wq_ats_disable.attr,
1279 &dev_attr_wq_occupancy.attr,
1280 NULL,
1281 };
1282
1283 static const struct attribute_group idxd_wq_attribute_group = {
1284 .attrs = idxd_wq_attributes,
1285 };
1286
1287 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1288 &idxd_wq_attribute_group,
1289 NULL,
1290 };
1291
1292 static void idxd_conf_wq_release(struct device *dev)
1293 {
1294 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1295
1296 kfree(wq->wqcfg);
1297 kfree(wq);
1298 }
1299
1300 struct device_type idxd_wq_device_type = {
1301 .name = "wq",
1302 .release = idxd_conf_wq_release,
1303 .groups = idxd_wq_attribute_groups,
1304 };
1305
1306 /* IDXD device attribs */
1307 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1308 char *buf)
1309 {
1310 struct idxd_device *idxd =
1311 container_of(dev, struct idxd_device, conf_dev);
1312
1313 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1314 }
1315 static DEVICE_ATTR_RO(version);
1316
1317 static ssize_t max_work_queues_size_show(struct device *dev,
1318 struct device_attribute *attr,
1319 char *buf)
1320 {
1321 struct idxd_device *idxd =
1322 container_of(dev, struct idxd_device, conf_dev);
1323
1324 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1325 }
1326 static DEVICE_ATTR_RO(max_work_queues_size);
1327
1328 static ssize_t max_groups_show(struct device *dev,
1329 struct device_attribute *attr, char *buf)
1330 {
1331 struct idxd_device *idxd =
1332 container_of(dev, struct idxd_device, conf_dev);
1333
1334 return sysfs_emit(buf, "%u\n", idxd->max_groups);
1335 }
1336 static DEVICE_ATTR_RO(max_groups);
1337
1338 static ssize_t max_work_queues_show(struct device *dev,
1339 struct device_attribute *attr, char *buf)
1340 {
1341 struct idxd_device *idxd =
1342 container_of(dev, struct idxd_device, conf_dev);
1343
1344 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1345 }
1346 static DEVICE_ATTR_RO(max_work_queues);
1347
1348 static ssize_t max_engines_show(struct device *dev,
1349 struct device_attribute *attr, char *buf)
1350 {
1351 struct idxd_device *idxd =
1352 container_of(dev, struct idxd_device, conf_dev);
1353
1354 return sysfs_emit(buf, "%u\n", idxd->max_engines);
1355 }
1356 static DEVICE_ATTR_RO(max_engines);
1357
1358 static ssize_t numa_node_show(struct device *dev,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct idxd_device *idxd =
1362 container_of(dev, struct idxd_device, conf_dev);
1363
1364 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1365 }
1366 static DEVICE_ATTR_RO(numa_node);
1367
1368 static ssize_t max_batch_size_show(struct device *dev,
1369 struct device_attribute *attr, char *buf)
1370 {
1371 struct idxd_device *idxd =
1372 container_of(dev, struct idxd_device, conf_dev);
1373
1374 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1375 }
1376 static DEVICE_ATTR_RO(max_batch_size);
1377
1378 static ssize_t max_transfer_size_show(struct device *dev,
1379 struct device_attribute *attr,
1380 char *buf)
1381 {
1382 struct idxd_device *idxd =
1383 container_of(dev, struct idxd_device, conf_dev);
1384
1385 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1386 }
1387 static DEVICE_ATTR_RO(max_transfer_size);
1388
1389 static ssize_t op_cap_show(struct device *dev,
1390 struct device_attribute *attr, char *buf)
1391 {
1392 struct idxd_device *idxd =
1393 container_of(dev, struct idxd_device, conf_dev);
1394 int i, rc = 0;
1395
1396 for (i = 0; i < 4; i++)
1397 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1398
1399 rc--;
1400 rc += sysfs_emit_at(buf, rc, "\n");
1401 return rc;
1402 }
1403 static DEVICE_ATTR_RO(op_cap);
1404
1405 static ssize_t gen_cap_show(struct device *dev,
1406 struct device_attribute *attr, char *buf)
1407 {
1408 struct idxd_device *idxd =
1409 container_of(dev, struct idxd_device, conf_dev);
1410
1411 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1412 }
1413 static DEVICE_ATTR_RO(gen_cap);
1414
1415 static ssize_t configurable_show(struct device *dev,
1416 struct device_attribute *attr, char *buf)
1417 {
1418 struct idxd_device *idxd =
1419 container_of(dev, struct idxd_device, conf_dev);
1420
1421 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1422 }
1423 static DEVICE_ATTR_RO(configurable);
1424
1425 static ssize_t clients_show(struct device *dev,
1426 struct device_attribute *attr, char *buf)
1427 {
1428 struct idxd_device *idxd =
1429 container_of(dev, struct idxd_device, conf_dev);
1430 unsigned long flags;
1431 int count = 0, i;
1432
1433 spin_lock_irqsave(&idxd->dev_lock, flags);
1434 for (i = 0; i < idxd->max_wqs; i++) {
1435 struct idxd_wq *wq = idxd->wqs[i];
1436
1437 count += wq->client_count;
1438 }
1439 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1440
1441 return sysfs_emit(buf, "%d\n", count);
1442 }
1443 static DEVICE_ATTR_RO(clients);
1444
1445 static ssize_t pasid_enabled_show(struct device *dev,
1446 struct device_attribute *attr, char *buf)
1447 {
1448 struct idxd_device *idxd =
1449 container_of(dev, struct idxd_device, conf_dev);
1450
1451 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1452 }
1453 static DEVICE_ATTR_RO(pasid_enabled);
1454
1455 static ssize_t state_show(struct device *dev,
1456 struct device_attribute *attr, char *buf)
1457 {
1458 struct idxd_device *idxd =
1459 container_of(dev, struct idxd_device, conf_dev);
1460
1461 switch (idxd->state) {
1462 case IDXD_DEV_DISABLED:
1463 case IDXD_DEV_CONF_READY:
1464 return sysfs_emit(buf, "disabled\n");
1465 case IDXD_DEV_ENABLED:
1466 return sysfs_emit(buf, "enabled\n");
1467 case IDXD_DEV_HALTED:
1468 return sysfs_emit(buf, "halted\n");
1469 }
1470
1471 return sysfs_emit(buf, "unknown\n");
1472 }
1473 static DEVICE_ATTR_RO(state);
1474
1475 static ssize_t errors_show(struct device *dev,
1476 struct device_attribute *attr, char *buf)
1477 {
1478 struct idxd_device *idxd =
1479 container_of(dev, struct idxd_device, conf_dev);
1480 int i, out = 0;
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&idxd->dev_lock, flags);
1484 for (i = 0; i < 4; i++)
1485 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1486 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1487 out--;
1488 out += sysfs_emit_at(buf, out, "\n");
1489 return out;
1490 }
1491 static DEVICE_ATTR_RO(errors);
1492
1493 static ssize_t max_tokens_show(struct device *dev,
1494 struct device_attribute *attr, char *buf)
1495 {
1496 struct idxd_device *idxd =
1497 container_of(dev, struct idxd_device, conf_dev);
1498
1499 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1500 }
1501 static DEVICE_ATTR_RO(max_tokens);
1502
1503 static ssize_t token_limit_show(struct device *dev,
1504 struct device_attribute *attr, char *buf)
1505 {
1506 struct idxd_device *idxd =
1507 container_of(dev, struct idxd_device, conf_dev);
1508
1509 return sysfs_emit(buf, "%u\n", idxd->token_limit);
1510 }
1511
1512 static ssize_t token_limit_store(struct device *dev,
1513 struct device_attribute *attr,
1514 const char *buf, size_t count)
1515 {
1516 struct idxd_device *idxd =
1517 container_of(dev, struct idxd_device, conf_dev);
1518 unsigned long val;
1519 int rc;
1520
1521 rc = kstrtoul(buf, 10, &val);
1522 if (rc < 0)
1523 return -EINVAL;
1524
1525 if (idxd->state == IDXD_DEV_ENABLED)
1526 return -EPERM;
1527
1528 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1529 return -EPERM;
1530
1531 if (!idxd->hw.group_cap.token_limit)
1532 return -EPERM;
1533
1534 if (val > idxd->hw.group_cap.total_tokens)
1535 return -EINVAL;
1536
1537 idxd->token_limit = val;
1538 return count;
1539 }
1540 static DEVICE_ATTR_RW(token_limit);
1541
1542 static ssize_t cdev_major_show(struct device *dev,
1543 struct device_attribute *attr, char *buf)
1544 {
1545 struct idxd_device *idxd =
1546 container_of(dev, struct idxd_device, conf_dev);
1547
1548 return sysfs_emit(buf, "%u\n", idxd->major);
1549 }
1550 static DEVICE_ATTR_RO(cdev_major);
1551
1552 static ssize_t cmd_status_show(struct device *dev,
1553 struct device_attribute *attr, char *buf)
1554 {
1555 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1556
1557 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1558 }
1559 static DEVICE_ATTR_RO(cmd_status);
1560
1561 static struct attribute *idxd_device_attributes[] = {
1562 &dev_attr_version.attr,
1563 &dev_attr_max_groups.attr,
1564 &dev_attr_max_work_queues.attr,
1565 &dev_attr_max_work_queues_size.attr,
1566 &dev_attr_max_engines.attr,
1567 &dev_attr_numa_node.attr,
1568 &dev_attr_max_batch_size.attr,
1569 &dev_attr_max_transfer_size.attr,
1570 &dev_attr_op_cap.attr,
1571 &dev_attr_gen_cap.attr,
1572 &dev_attr_configurable.attr,
1573 &dev_attr_clients.attr,
1574 &dev_attr_pasid_enabled.attr,
1575 &dev_attr_state.attr,
1576 &dev_attr_errors.attr,
1577 &dev_attr_max_tokens.attr,
1578 &dev_attr_token_limit.attr,
1579 &dev_attr_cdev_major.attr,
1580 &dev_attr_cmd_status.attr,
1581 NULL,
1582 };
1583
1584 static const struct attribute_group idxd_device_attribute_group = {
1585 .attrs = idxd_device_attributes,
1586 };
1587
1588 static const struct attribute_group *idxd_attribute_groups[] = {
1589 &idxd_device_attribute_group,
1590 NULL,
1591 };
1592
1593 static void idxd_conf_device_release(struct device *dev)
1594 {
1595 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1596
1597 kfree(idxd->groups);
1598 kfree(idxd->wqs);
1599 kfree(idxd->engines);
1600 kfree(idxd->irq_entries);
1601 kfree(idxd->int_handles);
1602 ida_free(&idxd_ida, idxd->id);
1603 kfree(idxd);
1604 }
1605
1606 struct device_type dsa_device_type = {
1607 .name = "dsa",
1608 .release = idxd_conf_device_release,
1609 .groups = idxd_attribute_groups,
1610 };
1611
1612 struct device_type iax_device_type = {
1613 .name = "iax",
1614 .release = idxd_conf_device_release,
1615 .groups = idxd_attribute_groups,
1616 };
1617
1618 static int idxd_register_engine_devices(struct idxd_device *idxd)
1619 {
1620 int i, j, rc;
1621
1622 for (i = 0; i < idxd->max_engines; i++) {
1623 struct idxd_engine *engine = idxd->engines[i];
1624
1625 rc = device_add(&engine->conf_dev);
1626 if (rc < 0)
1627 goto cleanup;
1628 }
1629
1630 return 0;
1631
1632 cleanup:
1633 j = i - 1;
1634 for (; i < idxd->max_engines; i++)
1635 put_device(&idxd->engines[i]->conf_dev);
1636
1637 while (j--)
1638 device_unregister(&idxd->engines[j]->conf_dev);
1639 return rc;
1640 }
1641
1642 static int idxd_register_group_devices(struct idxd_device *idxd)
1643 {
1644 int i, j, rc;
1645
1646 for (i = 0; i < idxd->max_groups; i++) {
1647 struct idxd_group *group = idxd->groups[i];
1648
1649 rc = device_add(&group->conf_dev);
1650 if (rc < 0)
1651 goto cleanup;
1652 }
1653
1654 return 0;
1655
1656 cleanup:
1657 j = i - 1;
1658 for (; i < idxd->max_groups; i++)
1659 put_device(&idxd->groups[i]->conf_dev);
1660
1661 while (j--)
1662 device_unregister(&idxd->groups[j]->conf_dev);
1663 return rc;
1664 }
1665
1666 static int idxd_register_wq_devices(struct idxd_device *idxd)
1667 {
1668 int i, rc, j;
1669
1670 for (i = 0; i < idxd->max_wqs; i++) {
1671 struct idxd_wq *wq = idxd->wqs[i];
1672
1673 rc = device_add(&wq->conf_dev);
1674 if (rc < 0)
1675 goto cleanup;
1676 }
1677
1678 return 0;
1679
1680 cleanup:
1681 j = i - 1;
1682 for (; i < idxd->max_wqs; i++)
1683 put_device(&idxd->wqs[i]->conf_dev);
1684
1685 while (j--)
1686 device_unregister(&idxd->wqs[j]->conf_dev);
1687 return rc;
1688 }
1689
1690 int idxd_register_devices(struct idxd_device *idxd)
1691 {
1692 struct device *dev = &idxd->pdev->dev;
1693 int rc, i;
1694
1695 rc = device_add(&idxd->conf_dev);
1696 if (rc < 0)
1697 return rc;
1698
1699 rc = idxd_register_wq_devices(idxd);
1700 if (rc < 0) {
1701 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1702 goto err_wq;
1703 }
1704
1705 rc = idxd_register_engine_devices(idxd);
1706 if (rc < 0) {
1707 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1708 goto err_engine;
1709 }
1710
1711 rc = idxd_register_group_devices(idxd);
1712 if (rc < 0) {
1713 dev_dbg(dev, "Group device registering failed: %d\n", rc);
1714 goto err_group;
1715 }
1716
1717 return 0;
1718
1719 err_group:
1720 for (i = 0; i < idxd->max_engines; i++)
1721 device_unregister(&idxd->engines[i]->conf_dev);
1722 err_engine:
1723 for (i = 0; i < idxd->max_wqs; i++)
1724 device_unregister(&idxd->wqs[i]->conf_dev);
1725 err_wq:
1726 device_del(&idxd->conf_dev);
1727 return rc;
1728 }
1729
1730 void idxd_unregister_devices(struct idxd_device *idxd)
1731 {
1732 int i;
1733
1734 for (i = 0; i < idxd->max_wqs; i++) {
1735 struct idxd_wq *wq = idxd->wqs[i];
1736
1737 device_unregister(&wq->conf_dev);
1738 }
1739
1740 for (i = 0; i < idxd->max_engines; i++) {
1741 struct idxd_engine *engine = idxd->engines[i];
1742
1743 device_unregister(&engine->conf_dev);
1744 }
1745
1746 for (i = 0; i < idxd->max_groups; i++) {
1747 struct idxd_group *group = idxd->groups[i];
1748
1749 device_unregister(&group->conf_dev);
1750 }
1751 }
1752
1753 int idxd_register_bus_type(void)
1754 {
1755 return bus_register(&dsa_bus_type);
1756 }
1757
1758 void idxd_unregister_bus_type(void)
1759 {
1760 bus_unregister(&dsa_bus_type);
1761 }