]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/idxd/sysfs.c
dmaengine: idxd: fix bus_probe() and bus_remove() for dsa_bus
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / sysfs.c
CommitLineData
c52ca478
DJ
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <uapi/linux/idxd.h>
10#include "registers.h"
11#include "idxd.h"
12
13static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
42d279f9 16 [IDXD_WQT_USER] = "user",
c52ca478
DJ
17};
18
c52ca478
DJ
19static int idxd_config_bus_match(struct device *dev,
20 struct device_driver *drv)
21{
fcc2281b 22 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
c52ca478 23
fcc2281b
DJ
24 return (is_idxd_dev(idxd_dev) || is_idxd_wq_dev(idxd_dev));
25}
c52ca478 26
fcc2281b
DJ
27static int idxd_config_bus_probe(struct device *dev)
28{
29 struct idxd_device_driver *idxd_drv =
30 container_of(dev->driver, struct idxd_device_driver, drv);
31 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
c52ca478 32
fcc2281b
DJ
33 return idxd_drv->probe(idxd_dev);
34}
c52ca478 35
fcc2281b
DJ
36static int idxd_config_bus_remove(struct device *dev)
37{
38 struct idxd_device_driver *idxd_drv =
39 container_of(dev->driver, struct idxd_device_driver, drv);
40 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
41
42 idxd_drv->remove(idxd_dev);
43 return 0;
c52ca478
DJ
44}
45
fcc2281b
DJ
46struct bus_type dsa_bus_type = {
47 .name = "dsa",
48 .match = idxd_config_bus_match,
49 .probe = idxd_config_bus_probe,
50 .remove = idxd_config_bus_remove,
51};
52
53static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev)
c52ca478 54{
fcc2281b 55 struct device *dev = &idxd_dev->conf_dev;
c52ca478 56 unsigned long flags;
fcc2281b 57 int rc;
c52ca478 58
fcc2281b
DJ
59 if (is_idxd_dev(idxd_dev)) {
60 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
c52ca478 61
fcc2281b 62 if (idxd->state != IDXD_DEV_DISABLED)
42d279f9
DJ
63 return -ENXIO;
64
fcc2281b 65 /* Device configuration */
0d5c10b4 66 spin_lock_irqsave(&idxd->dev_lock, flags);
8c66bbdc
DJ
67 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
68 rc = idxd_device_config(idxd);
0d5c10b4 69 spin_unlock_irqrestore(&idxd->dev_lock, flags);
c52ca478 70 if (rc < 0) {
fcc2281b 71 dev_dbg(dev, "Device config failed: %d\n", rc);
c52ca478
DJ
72 return rc;
73 }
74
fcc2281b 75 /* Start device */
c52ca478
DJ
76 rc = idxd_device_enable(idxd);
77 if (rc < 0) {
c52ca478
DJ
78 dev_warn(dev, "Device enable failed: %d\n", rc);
79 return rc;
80 }
81
fcc2281b 82 /* Setup DMA device without channels */
8f47d1a5
DJ
83 rc = idxd_register_dma_device(idxd);
84 if (rc < 0) {
8f47d1a5 85 dev_dbg(dev, "Failed to register dmaengine device\n");
fcc2281b 86 idxd_device_disable(idxd);
8f47d1a5
DJ
87 return rc;
88 }
fcc2281b
DJ
89
90 dev_info(dev, "Device %s enabled\n", dev_name(dev));
c52ca478 91 return 0;
fcc2281b
DJ
92 }
93
94 if (is_idxd_wq_dev(idxd_dev)) {
95 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
c52ca478 96
1f2bb403 97 return drv_enable_wq(wq);
c52ca478
DJ
98 }
99
100 return -ENODEV;
101}
102
fcc2281b 103static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev)
c52ca478 104{
fcc2281b 105 struct device *dev = &idxd_dev->conf_dev;
c52ca478 106
fcc2281b
DJ
107 if (is_idxd_dev(idxd_dev)) {
108 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
c52ca478
DJ
109 int i;
110
c52ca478 111 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 112 struct idxd_wq *wq = idxd->wqs[i];
c52ca478
DJ
113
114 if (wq->state == IDXD_WQ_DISABLED)
115 continue;
116 dev_warn(dev, "Active wq %d on disable %s.\n", i,
700af3a0
DJ
117 dev_name(wq_confdev(wq)));
118 device_release_driver(wq_confdev(wq));
c52ca478
DJ
119 }
120
8f47d1a5 121 idxd_unregister_dma_device(idxd);
0dcfe41e
DJ
122 idxd_device_disable(idxd);
123 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
124 idxd_device_reset(idxd);
fcc2281b 125 return;
c52ca478
DJ
126 }
127
fcc2281b
DJ
128 if (is_idxd_wq_dev(idxd_dev)) {
129 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
c52ca478 130
fcc2281b
DJ
131 drv_disable_wq(wq);
132 return;
133 }
134}
c52ca478 135
c52ca478 136static struct idxd_device_driver dsa_drv = {
da5a11d7 137 .name = "dsa",
fcc2281b
DJ
138 .probe = idxd_dsa_drv_probe,
139 .remove = idxd_dsa_drv_remove,
c52ca478
DJ
140};
141
c52ca478
DJ
142/* IDXD generic driver setup */
143int idxd_register_driver(void)
144{
3ecfc913 145 return idxd_driver_register(&dsa_drv);
c52ca478
DJ
146}
147
148void idxd_unregister_driver(void)
149{
3ecfc913 150 idxd_driver_unregister(&dsa_drv);
c52ca478
DJ
151}
152
153/* IDXD engine attributes */
154static ssize_t engine_group_id_show(struct device *dev,
155 struct device_attribute *attr, char *buf)
156{
700af3a0 157 struct idxd_engine *engine = confdev_to_engine(dev);
c52ca478
DJ
158
159 if (engine->group)
8241571f 160 return sysfs_emit(buf, "%d\n", engine->group->id);
c52ca478 161 else
8241571f 162 return sysfs_emit(buf, "%d\n", -1);
c52ca478
DJ
163}
164
165static ssize_t engine_group_id_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t count)
168{
700af3a0 169 struct idxd_engine *engine = confdev_to_engine(dev);
c52ca478
DJ
170 struct idxd_device *idxd = engine->idxd;
171 long id;
172 int rc;
f7b280c6 173 struct idxd_group *prevg;
c52ca478
DJ
174
175 rc = kstrtol(buf, 10, &id);
176 if (rc < 0)
177 return -EINVAL;
178
179 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
180 return -EPERM;
181
182 if (id > idxd->max_groups - 1 || id < -1)
183 return -EINVAL;
184
185 if (id == -1) {
186 if (engine->group) {
187 engine->group->num_engines--;
188 engine->group = NULL;
189 }
190 return count;
191 }
192
c52ca478
DJ
193 prevg = engine->group;
194
195 if (prevg)
196 prevg->num_engines--;
defe49f9 197 engine->group = idxd->groups[id];
c52ca478
DJ
198 engine->group->num_engines++;
199
200 return count;
201}
202
203static struct device_attribute dev_attr_engine_group =
204 __ATTR(group_id, 0644, engine_group_id_show,
205 engine_group_id_store);
206
207static struct attribute *idxd_engine_attributes[] = {
208 &dev_attr_engine_group.attr,
209 NULL,
210};
211
212static const struct attribute_group idxd_engine_attribute_group = {
213 .attrs = idxd_engine_attributes,
214};
215
216static const struct attribute_group *idxd_engine_attribute_groups[] = {
217 &idxd_engine_attribute_group,
218 NULL,
219};
220
75b91130
DJ
221static void idxd_conf_engine_release(struct device *dev)
222{
700af3a0 223 struct idxd_engine *engine = confdev_to_engine(dev);
75b91130
DJ
224
225 kfree(engine);
226}
227
228struct device_type idxd_engine_device_type = {
229 .name = "engine",
230 .release = idxd_conf_engine_release,
231 .groups = idxd_engine_attribute_groups,
232};
233
c52ca478
DJ
234/* Group attributes */
235
236static void idxd_set_free_tokens(struct idxd_device *idxd)
237{
238 int i, tokens;
239
240 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
defe49f9 241 struct idxd_group *g = idxd->groups[i];
c52ca478
DJ
242
243 tokens += g->tokens_reserved;
244 }
245
246 idxd->nr_tokens = idxd->max_tokens - tokens;
247}
248
249static ssize_t group_tokens_reserved_show(struct device *dev,
250 struct device_attribute *attr,
251 char *buf)
252{
700af3a0 253 struct idxd_group *group = confdev_to_group(dev);
c52ca478 254
8241571f 255 return sysfs_emit(buf, "%u\n", group->tokens_reserved);
c52ca478
DJ
256}
257
258static ssize_t group_tokens_reserved_store(struct device *dev,
259 struct device_attribute *attr,
260 const char *buf, size_t count)
261{
700af3a0 262 struct idxd_group *group = confdev_to_group(dev);
c52ca478
DJ
263 struct idxd_device *idxd = group->idxd;
264 unsigned long val;
265 int rc;
266
267 rc = kstrtoul(buf, 10, &val);
268 if (rc < 0)
269 return -EINVAL;
270
435b512d 271 if (idxd->data->type == IDXD_TYPE_IAX)
f25b4638
DJ
272 return -EOPNOTSUPP;
273
c52ca478
DJ
274 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
275 return -EPERM;
276
277 if (idxd->state == IDXD_DEV_ENABLED)
278 return -EPERM;
279
c52ca478
DJ
280 if (val > idxd->max_tokens)
281 return -EINVAL;
282
2d0b1919 283 if (val > idxd->nr_tokens + group->tokens_reserved)
c52ca478
DJ
284 return -EINVAL;
285
286 group->tokens_reserved = val;
287 idxd_set_free_tokens(idxd);
288 return count;
289}
290
291static struct device_attribute dev_attr_group_tokens_reserved =
292 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
293 group_tokens_reserved_store);
294
295static ssize_t group_tokens_allowed_show(struct device *dev,
296 struct device_attribute *attr,
297 char *buf)
298{
700af3a0 299 struct idxd_group *group = confdev_to_group(dev);
c52ca478 300
8241571f 301 return sysfs_emit(buf, "%u\n", group->tokens_allowed);
c52ca478
DJ
302}
303
304static ssize_t group_tokens_allowed_store(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf, size_t count)
307{
700af3a0 308 struct idxd_group *group = confdev_to_group(dev);
c52ca478
DJ
309 struct idxd_device *idxd = group->idxd;
310 unsigned long val;
311 int rc;
312
313 rc = kstrtoul(buf, 10, &val);
314 if (rc < 0)
315 return -EINVAL;
316
435b512d 317 if (idxd->data->type == IDXD_TYPE_IAX)
f25b4638
DJ
318 return -EOPNOTSUPP;
319
c52ca478
DJ
320 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
321 return -EPERM;
322
323 if (idxd->state == IDXD_DEV_ENABLED)
324 return -EPERM;
325
c52ca478
DJ
326 if (val < 4 * group->num_engines ||
327 val > group->tokens_reserved + idxd->nr_tokens)
328 return -EINVAL;
329
330 group->tokens_allowed = val;
331 return count;
332}
333
334static struct device_attribute dev_attr_group_tokens_allowed =
335 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
336 group_tokens_allowed_store);
337
338static ssize_t group_use_token_limit_show(struct device *dev,
339 struct device_attribute *attr,
340 char *buf)
341{
700af3a0 342 struct idxd_group *group = confdev_to_group(dev);
c52ca478 343
8241571f 344 return sysfs_emit(buf, "%u\n", group->use_token_limit);
c52ca478
DJ
345}
346
347static ssize_t group_use_token_limit_store(struct device *dev,
348 struct device_attribute *attr,
349 const char *buf, size_t count)
350{
700af3a0 351 struct idxd_group *group = confdev_to_group(dev);
c52ca478
DJ
352 struct idxd_device *idxd = group->idxd;
353 unsigned long val;
354 int rc;
355
356 rc = kstrtoul(buf, 10, &val);
357 if (rc < 0)
358 return -EINVAL;
359
435b512d 360 if (idxd->data->type == IDXD_TYPE_IAX)
f25b4638
DJ
361 return -EOPNOTSUPP;
362
c52ca478
DJ
363 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
364 return -EPERM;
365
366 if (idxd->state == IDXD_DEV_ENABLED)
367 return -EPERM;
368
369 if (idxd->token_limit == 0)
370 return -EPERM;
371
372 group->use_token_limit = !!val;
373 return count;
374}
375
376static struct device_attribute dev_attr_group_use_token_limit =
377 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
378 group_use_token_limit_store);
379
380static ssize_t group_engines_show(struct device *dev,
381 struct device_attribute *attr, char *buf)
382{
700af3a0 383 struct idxd_group *group = confdev_to_group(dev);
c52ca478 384 int i, rc = 0;
c52ca478
DJ
385 struct idxd_device *idxd = group->idxd;
386
387 for (i = 0; i < idxd->max_engines; i++) {
75b91130 388 struct idxd_engine *engine = idxd->engines[i];
c52ca478
DJ
389
390 if (!engine->group)
391 continue;
392
393 if (engine->group->id == group->id)
8241571f 394 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
c52ca478
DJ
395 }
396
8241571f
DJ
397 if (!rc)
398 return 0;
c52ca478 399 rc--;
8241571f 400 rc += sysfs_emit_at(buf, rc, "\n");
c52ca478
DJ
401
402 return rc;
403}
404
405static struct device_attribute dev_attr_group_engines =
406 __ATTR(engines, 0444, group_engines_show, NULL);
407
408static ssize_t group_work_queues_show(struct device *dev,
409 struct device_attribute *attr, char *buf)
410{
700af3a0 411 struct idxd_group *group = confdev_to_group(dev);
c52ca478 412 int i, rc = 0;
c52ca478
DJ
413 struct idxd_device *idxd = group->idxd;
414
415 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 416 struct idxd_wq *wq = idxd->wqs[i];
c52ca478
DJ
417
418 if (!wq->group)
419 continue;
420
421 if (wq->group->id == group->id)
8241571f 422 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
c52ca478
DJ
423 }
424
8241571f
DJ
425 if (!rc)
426 return 0;
c52ca478 427 rc--;
8241571f 428 rc += sysfs_emit_at(buf, rc, "\n");
c52ca478
DJ
429
430 return rc;
431}
432
433static struct device_attribute dev_attr_group_work_queues =
434 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
435
436static ssize_t group_traffic_class_a_show(struct device *dev,
437 struct device_attribute *attr,
438 char *buf)
439{
700af3a0 440 struct idxd_group *group = confdev_to_group(dev);
c52ca478 441
8241571f 442 return sysfs_emit(buf, "%d\n", group->tc_a);
c52ca478
DJ
443}
444
445static ssize_t group_traffic_class_a_store(struct device *dev,
446 struct device_attribute *attr,
447 const char *buf, size_t count)
448{
700af3a0 449 struct idxd_group *group = confdev_to_group(dev);
c52ca478
DJ
450 struct idxd_device *idxd = group->idxd;
451 long val;
452 int rc;
453
454 rc = kstrtol(buf, 10, &val);
455 if (rc < 0)
456 return -EINVAL;
457
458 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
459 return -EPERM;
460
461 if (idxd->state == IDXD_DEV_ENABLED)
462 return -EPERM;
463
464 if (val < 0 || val > 7)
465 return -EINVAL;
466
467 group->tc_a = val;
468 return count;
469}
470
471static struct device_attribute dev_attr_group_traffic_class_a =
472 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
473 group_traffic_class_a_store);
474
475static ssize_t group_traffic_class_b_show(struct device *dev,
476 struct device_attribute *attr,
477 char *buf)
478{
700af3a0 479 struct idxd_group *group = confdev_to_group(dev);
c52ca478 480
8241571f 481 return sysfs_emit(buf, "%d\n", group->tc_b);
c52ca478
DJ
482}
483
484static ssize_t group_traffic_class_b_store(struct device *dev,
485 struct device_attribute *attr,
486 const char *buf, size_t count)
487{
700af3a0 488 struct idxd_group *group = confdev_to_group(dev);
c52ca478
DJ
489 struct idxd_device *idxd = group->idxd;
490 long val;
491 int rc;
492
493 rc = kstrtol(buf, 10, &val);
494 if (rc < 0)
495 return -EINVAL;
496
497 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
498 return -EPERM;
499
500 if (idxd->state == IDXD_DEV_ENABLED)
501 return -EPERM;
502
503 if (val < 0 || val > 7)
504 return -EINVAL;
505
506 group->tc_b = val;
507 return count;
508}
509
510static struct device_attribute dev_attr_group_traffic_class_b =
511 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
512 group_traffic_class_b_store);
513
514static struct attribute *idxd_group_attributes[] = {
515 &dev_attr_group_work_queues.attr,
516 &dev_attr_group_engines.attr,
517 &dev_attr_group_use_token_limit.attr,
518 &dev_attr_group_tokens_allowed.attr,
519 &dev_attr_group_tokens_reserved.attr,
520 &dev_attr_group_traffic_class_a.attr,
521 &dev_attr_group_traffic_class_b.attr,
522 NULL,
523};
524
525static const struct attribute_group idxd_group_attribute_group = {
526 .attrs = idxd_group_attributes,
527};
528
529static const struct attribute_group *idxd_group_attribute_groups[] = {
530 &idxd_group_attribute_group,
531 NULL,
532};
533
defe49f9
DJ
534static void idxd_conf_group_release(struct device *dev)
535{
700af3a0 536 struct idxd_group *group = confdev_to_group(dev);
defe49f9
DJ
537
538 kfree(group);
539}
540
541struct device_type idxd_group_device_type = {
542 .name = "group",
543 .release = idxd_conf_group_release,
544 .groups = idxd_group_attribute_groups,
545};
546
c52ca478
DJ
547/* IDXD work queue attribs */
548static ssize_t wq_clients_show(struct device *dev,
549 struct device_attribute *attr, char *buf)
550{
700af3a0 551 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478 552
8241571f 553 return sysfs_emit(buf, "%d\n", wq->client_count);
c52ca478
DJ
554}
555
556static struct device_attribute dev_attr_wq_clients =
557 __ATTR(clients, 0444, wq_clients_show, NULL);
558
559static ssize_t wq_state_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
700af3a0 562 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
563
564 switch (wq->state) {
565 case IDXD_WQ_DISABLED:
8241571f 566 return sysfs_emit(buf, "disabled\n");
c52ca478 567 case IDXD_WQ_ENABLED:
8241571f 568 return sysfs_emit(buf, "enabled\n");
c52ca478
DJ
569 }
570
8241571f 571 return sysfs_emit(buf, "unknown\n");
c52ca478
DJ
572}
573
574static struct device_attribute dev_attr_wq_state =
575 __ATTR(state, 0444, wq_state_show, NULL);
576
577static ssize_t wq_group_id_show(struct device *dev,
578 struct device_attribute *attr, char *buf)
579{
700af3a0 580 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
581
582 if (wq->group)
8241571f 583 return sysfs_emit(buf, "%u\n", wq->group->id);
c52ca478 584 else
8241571f 585 return sysfs_emit(buf, "-1\n");
c52ca478
DJ
586}
587
588static ssize_t wq_group_id_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t count)
591{
700af3a0 592 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
593 struct idxd_device *idxd = wq->idxd;
594 long id;
595 int rc;
596 struct idxd_group *prevg, *group;
597
598 rc = kstrtol(buf, 10, &id);
599 if (rc < 0)
600 return -EINVAL;
601
602 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
603 return -EPERM;
604
605 if (wq->state != IDXD_WQ_DISABLED)
606 return -EPERM;
607
608 if (id > idxd->max_groups - 1 || id < -1)
609 return -EINVAL;
610
611 if (id == -1) {
612 if (wq->group) {
613 wq->group->num_wqs--;
614 wq->group = NULL;
615 }
616 return count;
617 }
618
defe49f9 619 group = idxd->groups[id];
c52ca478
DJ
620 prevg = wq->group;
621
622 if (prevg)
623 prevg->num_wqs--;
624 wq->group = group;
625 group->num_wqs++;
626 return count;
627}
628
629static struct device_attribute dev_attr_wq_group_id =
630 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
631
632static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
633 char *buf)
634{
700af3a0 635 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478 636
8241571f 637 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
c52ca478
DJ
638}
639
640static ssize_t wq_mode_store(struct device *dev,
641 struct device_attribute *attr, const char *buf,
642 size_t count)
643{
700af3a0 644 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
645 struct idxd_device *idxd = wq->idxd;
646
647 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
648 return -EPERM;
649
650 if (wq->state != IDXD_WQ_DISABLED)
651 return -EPERM;
652
653 if (sysfs_streq(buf, "dedicated")) {
654 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
655 wq->threshold = 0;
8e50d392
DJ
656 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
657 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
c52ca478
DJ
658 } else {
659 return -EINVAL;
660 }
661
662 return count;
663}
664
665static struct device_attribute dev_attr_wq_mode =
666 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
667
668static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
669 char *buf)
670{
700af3a0 671 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478 672
8241571f 673 return sysfs_emit(buf, "%u\n", wq->size);
c52ca478
DJ
674}
675
50e7e7f6
DJ
676static int total_claimed_wq_size(struct idxd_device *idxd)
677{
678 int i;
679 int wq_size = 0;
680
681 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 682 struct idxd_wq *wq = idxd->wqs[i];
50e7e7f6
DJ
683
684 wq_size += wq->size;
685 }
686
687 return wq_size;
688}
689
c52ca478
DJ
690static ssize_t wq_size_store(struct device *dev,
691 struct device_attribute *attr, const char *buf,
692 size_t count)
693{
700af3a0 694 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
695 unsigned long size;
696 struct idxd_device *idxd = wq->idxd;
697 int rc;
698
699 rc = kstrtoul(buf, 10, &size);
700 if (rc < 0)
701 return -EINVAL;
702
703 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
704 return -EPERM;
705
0fff71c5 706 if (idxd->state == IDXD_DEV_ENABLED)
c52ca478
DJ
707 return -EPERM;
708
50e7e7f6 709 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
c52ca478
DJ
710 return -EINVAL;
711
712 wq->size = size;
713 return count;
714}
715
716static struct device_attribute dev_attr_wq_size =
717 __ATTR(size, 0644, wq_size_show, wq_size_store);
718
719static ssize_t wq_priority_show(struct device *dev,
720 struct device_attribute *attr, char *buf)
721{
700af3a0 722 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478 723
8241571f 724 return sysfs_emit(buf, "%u\n", wq->priority);
c52ca478
DJ
725}
726
727static ssize_t wq_priority_store(struct device *dev,
728 struct device_attribute *attr,
729 const char *buf, size_t count)
730{
700af3a0 731 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
732 unsigned long prio;
733 struct idxd_device *idxd = wq->idxd;
734 int rc;
735
736 rc = kstrtoul(buf, 10, &prio);
737 if (rc < 0)
738 return -EINVAL;
739
740 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
741 return -EPERM;
742
743 if (wq->state != IDXD_WQ_DISABLED)
744 return -EPERM;
745
746 if (prio > IDXD_MAX_PRIORITY)
747 return -EINVAL;
748
749 wq->priority = prio;
750 return count;
751}
752
753static struct device_attribute dev_attr_wq_priority =
754 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
755
8e50d392
DJ
756static ssize_t wq_block_on_fault_show(struct device *dev,
757 struct device_attribute *attr, char *buf)
758{
700af3a0 759 struct idxd_wq *wq = confdev_to_wq(dev);
8e50d392 760
8241571f 761 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
8e50d392
DJ
762}
763
764static ssize_t wq_block_on_fault_store(struct device *dev,
765 struct device_attribute *attr,
766 const char *buf, size_t count)
767{
700af3a0 768 struct idxd_wq *wq = confdev_to_wq(dev);
8e50d392
DJ
769 struct idxd_device *idxd = wq->idxd;
770 bool bof;
771 int rc;
772
773 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
774 return -EPERM;
775
776 if (wq->state != IDXD_WQ_DISABLED)
777 return -ENXIO;
778
779 rc = kstrtobool(buf, &bof);
780 if (rc < 0)
781 return rc;
782
783 if (bof)
784 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
785 else
786 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
787
788 return count;
789}
790
791static struct device_attribute dev_attr_wq_block_on_fault =
792 __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
793 wq_block_on_fault_store);
794
795static ssize_t wq_threshold_show(struct device *dev,
796 struct device_attribute *attr, char *buf)
797{
700af3a0 798 struct idxd_wq *wq = confdev_to_wq(dev);
8e50d392 799
8241571f 800 return sysfs_emit(buf, "%u\n", wq->threshold);
8e50d392
DJ
801}
802
803static ssize_t wq_threshold_store(struct device *dev,
804 struct device_attribute *attr,
805 const char *buf, size_t count)
806{
700af3a0 807 struct idxd_wq *wq = confdev_to_wq(dev);
8e50d392
DJ
808 struct idxd_device *idxd = wq->idxd;
809 unsigned int val;
810 int rc;
811
812 rc = kstrtouint(buf, 0, &val);
813 if (rc < 0)
814 return -EINVAL;
815
816 if (val > wq->size || val <= 0)
817 return -EINVAL;
818
819 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
820 return -EPERM;
821
822 if (wq->state != IDXD_WQ_DISABLED)
823 return -ENXIO;
824
825 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
826 return -EINVAL;
827
828 wq->threshold = val;
829
830 return count;
831}
832
833static struct device_attribute dev_attr_wq_threshold =
834 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
835
c52ca478
DJ
836static ssize_t wq_type_show(struct device *dev,
837 struct device_attribute *attr, char *buf)
838{
700af3a0 839 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
840
841 switch (wq->type) {
842 case IDXD_WQT_KERNEL:
8241571f 843 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
42d279f9 844 case IDXD_WQT_USER:
8241571f 845 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
c52ca478
DJ
846 case IDXD_WQT_NONE:
847 default:
8241571f 848 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
c52ca478
DJ
849 }
850
851 return -EINVAL;
852}
853
854static ssize_t wq_type_store(struct device *dev,
855 struct device_attribute *attr, const char *buf,
856 size_t count)
857{
700af3a0 858 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
859 enum idxd_wq_type old_type;
860
861 if (wq->state != IDXD_WQ_DISABLED)
862 return -EPERM;
863
864 old_type = wq->type;
88402c5b
DJ
865 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
866 wq->type = IDXD_WQT_NONE;
867 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
c52ca478 868 wq->type = IDXD_WQT_KERNEL;
42d279f9
DJ
869 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
870 wq->type = IDXD_WQT_USER;
c52ca478 871 else
88402c5b 872 return -EINVAL;
c52ca478
DJ
873
874 /* If we are changing queue type, clear the name */
875 if (wq->type != old_type)
876 memset(wq->name, 0, WQ_NAME_SIZE + 1);
877
878 return count;
879}
880
881static struct device_attribute dev_attr_wq_type =
882 __ATTR(type, 0644, wq_type_show, wq_type_store);
883
884static ssize_t wq_name_show(struct device *dev,
885 struct device_attribute *attr, char *buf)
886{
700af3a0 887 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478 888
8241571f 889 return sysfs_emit(buf, "%s\n", wq->name);
c52ca478
DJ
890}
891
892static ssize_t wq_name_store(struct device *dev,
893 struct device_attribute *attr, const char *buf,
894 size_t count)
895{
700af3a0 896 struct idxd_wq *wq = confdev_to_wq(dev);
c52ca478
DJ
897
898 if (wq->state != IDXD_WQ_DISABLED)
899 return -EPERM;
900
901 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
902 return -EINVAL;
903
8e50d392
DJ
904 /*
905 * This is temporarily placed here until we have SVM support for
906 * dmaengine.
907 */
908 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
909 return -EOPNOTSUPP;
910
c52ca478
DJ
911 memset(wq->name, 0, WQ_NAME_SIZE + 1);
912 strncpy(wq->name, buf, WQ_NAME_SIZE);
913 strreplace(wq->name, '\n', '\0');
914 return count;
915}
916
917static struct device_attribute dev_attr_wq_name =
918 __ATTR(name, 0644, wq_name_show, wq_name_store);
919
42d279f9
DJ
920static ssize_t wq_cdev_minor_show(struct device *dev,
921 struct device_attribute *attr, char *buf)
922{
700af3a0 923 struct idxd_wq *wq = confdev_to_wq(dev);
04922b74 924 int minor = -1;
42d279f9 925
04922b74
DJ
926 mutex_lock(&wq->wq_lock);
927 if (wq->idxd_cdev)
928 minor = wq->idxd_cdev->minor;
929 mutex_unlock(&wq->wq_lock);
930
931 if (minor == -1)
932 return -ENXIO;
933 return sysfs_emit(buf, "%d\n", minor);
42d279f9
DJ
934}
935
936static struct device_attribute dev_attr_wq_cdev_minor =
937 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
938
e7184b15
DJ
939static int __get_sysfs_u64(const char *buf, u64 *val)
940{
941 int rc;
942
943 rc = kstrtou64(buf, 0, val);
944 if (rc < 0)
945 return -EINVAL;
946
947 if (*val == 0)
948 return -EINVAL;
949
950 *val = roundup_pow_of_two(*val);
951 return 0;
952}
953
d7aad555
DJ
954static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
955 char *buf)
956{
700af3a0 957 struct idxd_wq *wq = confdev_to_wq(dev);
d7aad555 958
8241571f 959 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
d7aad555
DJ
960}
961
962static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
963 const char *buf, size_t count)
964{
700af3a0 965 struct idxd_wq *wq = confdev_to_wq(dev);
d7aad555
DJ
966 struct idxd_device *idxd = wq->idxd;
967 u64 xfer_size;
968 int rc;
969
970 if (wq->state != IDXD_WQ_DISABLED)
971 return -EPERM;
972
e7184b15 973 rc = __get_sysfs_u64(buf, &xfer_size);
d7aad555 974 if (rc < 0)
e7184b15 975 return rc;
d7aad555 976
d7aad555
DJ
977 if (xfer_size > idxd->max_xfer_bytes)
978 return -EINVAL;
979
980 wq->max_xfer_bytes = xfer_size;
981
982 return count;
983}
984
985static struct device_attribute dev_attr_wq_max_transfer_size =
986 __ATTR(max_transfer_size, 0644,
987 wq_max_transfer_size_show, wq_max_transfer_size_store);
988
e7184b15
DJ
989static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
990{
700af3a0 991 struct idxd_wq *wq = confdev_to_wq(dev);
e7184b15 992
8241571f 993 return sysfs_emit(buf, "%u\n", wq->max_batch_size);
e7184b15
DJ
994}
995
996static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
997 const char *buf, size_t count)
998{
700af3a0 999 struct idxd_wq *wq = confdev_to_wq(dev);
e7184b15
DJ
1000 struct idxd_device *idxd = wq->idxd;
1001 u64 batch_size;
1002 int rc;
1003
1004 if (wq->state != IDXD_WQ_DISABLED)
1005 return -EPERM;
1006
1007 rc = __get_sysfs_u64(buf, &batch_size);
1008 if (rc < 0)
1009 return rc;
1010
1011 if (batch_size > idxd->max_batch_size)
1012 return -EINVAL;
1013
1014 wq->max_batch_size = (u32)batch_size;
1015
1016 return count;
1017}
1018
1019static struct device_attribute dev_attr_wq_max_batch_size =
1020 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1021
92de5fa2
DJ
1022static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1023{
700af3a0 1024 struct idxd_wq *wq = confdev_to_wq(dev);
92de5fa2 1025
8241571f 1026 return sysfs_emit(buf, "%u\n", wq->ats_dis);
92de5fa2
DJ
1027}
1028
1029static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1030 const char *buf, size_t count)
1031{
700af3a0 1032 struct idxd_wq *wq = confdev_to_wq(dev);
92de5fa2
DJ
1033 struct idxd_device *idxd = wq->idxd;
1034 bool ats_dis;
1035 int rc;
1036
1037 if (wq->state != IDXD_WQ_DISABLED)
1038 return -EPERM;
1039
1040 if (!idxd->hw.wq_cap.wq_ats_support)
1041 return -EOPNOTSUPP;
1042
1043 rc = kstrtobool(buf, &ats_dis);
1044 if (rc < 0)
1045 return rc;
1046
1047 wq->ats_dis = ats_dis;
1048
1049 return count;
1050}
1051
1052static struct device_attribute dev_attr_wq_ats_disable =
1053 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1054
e753a64b
DJ
1055static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
1056{
1057 struct idxd_wq *wq = confdev_to_wq(dev);
1058 struct idxd_device *idxd = wq->idxd;
1059 u32 occup, offset;
1060
1061 if (!idxd->hw.wq_cap.occupancy)
1062 return -EOPNOTSUPP;
1063
1064 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
1065 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
1066
1067 return sysfs_emit(buf, "%u\n", occup);
1068}
1069
1070static struct device_attribute dev_attr_wq_occupancy =
1071 __ATTR(occupancy, 0444, wq_occupancy_show, NULL);
1072
c52ca478
DJ
1073static struct attribute *idxd_wq_attributes[] = {
1074 &dev_attr_wq_clients.attr,
1075 &dev_attr_wq_state.attr,
1076 &dev_attr_wq_group_id.attr,
1077 &dev_attr_wq_mode.attr,
1078 &dev_attr_wq_size.attr,
1079 &dev_attr_wq_priority.attr,
8e50d392
DJ
1080 &dev_attr_wq_block_on_fault.attr,
1081 &dev_attr_wq_threshold.attr,
c52ca478
DJ
1082 &dev_attr_wq_type.attr,
1083 &dev_attr_wq_name.attr,
42d279f9 1084 &dev_attr_wq_cdev_minor.attr,
d7aad555 1085 &dev_attr_wq_max_transfer_size.attr,
e7184b15 1086 &dev_attr_wq_max_batch_size.attr,
92de5fa2 1087 &dev_attr_wq_ats_disable.attr,
e753a64b 1088 &dev_attr_wq_occupancy.attr,
c52ca478
DJ
1089 NULL,
1090};
1091
1092static const struct attribute_group idxd_wq_attribute_group = {
1093 .attrs = idxd_wq_attributes,
1094};
1095
1096static const struct attribute_group *idxd_wq_attribute_groups[] = {
1097 &idxd_wq_attribute_group,
1098 NULL,
1099};
1100
7c5dd23e
DJ
1101static void idxd_conf_wq_release(struct device *dev)
1102{
700af3a0 1103 struct idxd_wq *wq = confdev_to_wq(dev);
7c5dd23e
DJ
1104
1105 kfree(wq->wqcfg);
1106 kfree(wq);
1107}
1108
1109struct device_type idxd_wq_device_type = {
1110 .name = "wq",
1111 .release = idxd_conf_wq_release,
1112 .groups = idxd_wq_attribute_groups,
1113};
1114
c52ca478 1115/* IDXD device attribs */
c2ce6bbc
DJ
1116static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1117 char *buf)
1118{
700af3a0 1119 struct idxd_device *idxd = confdev_to_idxd(dev);
c2ce6bbc 1120
8241571f 1121 return sysfs_emit(buf, "%#x\n", idxd->hw.version);
c2ce6bbc
DJ
1122}
1123static DEVICE_ATTR_RO(version);
1124
c52ca478
DJ
1125static ssize_t max_work_queues_size_show(struct device *dev,
1126 struct device_attribute *attr,
1127 char *buf)
1128{
700af3a0 1129 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1130
8241571f 1131 return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
c52ca478
DJ
1132}
1133static DEVICE_ATTR_RO(max_work_queues_size);
1134
1135static ssize_t max_groups_show(struct device *dev,
1136 struct device_attribute *attr, char *buf)
1137{
700af3a0 1138 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1139
8241571f 1140 return sysfs_emit(buf, "%u\n", idxd->max_groups);
c52ca478
DJ
1141}
1142static DEVICE_ATTR_RO(max_groups);
1143
1144static ssize_t max_work_queues_show(struct device *dev,
1145 struct device_attribute *attr, char *buf)
1146{
700af3a0 1147 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1148
8241571f 1149 return sysfs_emit(buf, "%u\n", idxd->max_wqs);
c52ca478
DJ
1150}
1151static DEVICE_ATTR_RO(max_work_queues);
1152
1153static ssize_t max_engines_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
1155{
700af3a0 1156 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1157
8241571f 1158 return sysfs_emit(buf, "%u\n", idxd->max_engines);
c52ca478
DJ
1159}
1160static DEVICE_ATTR_RO(max_engines);
1161
1162static ssize_t numa_node_show(struct device *dev,
1163 struct device_attribute *attr, char *buf)
1164{
700af3a0 1165 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1166
8241571f 1167 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
c52ca478
DJ
1168}
1169static DEVICE_ATTR_RO(numa_node);
1170
1171static ssize_t max_batch_size_show(struct device *dev,
1172 struct device_attribute *attr, char *buf)
1173{
700af3a0 1174 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1175
8241571f 1176 return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
c52ca478
DJ
1177}
1178static DEVICE_ATTR_RO(max_batch_size);
1179
1180static ssize_t max_transfer_size_show(struct device *dev,
1181 struct device_attribute *attr,
1182 char *buf)
1183{
700af3a0 1184 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1185
8241571f 1186 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
c52ca478
DJ
1187}
1188static DEVICE_ATTR_RO(max_transfer_size);
1189
1190static ssize_t op_cap_show(struct device *dev,
1191 struct device_attribute *attr, char *buf)
1192{
700af3a0 1193 struct idxd_device *idxd = confdev_to_idxd(dev);
ea6a5735
DJ
1194 int i, rc = 0;
1195
1196 for (i = 0; i < 4; i++)
1197 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
c52ca478 1198
ea6a5735
DJ
1199 rc--;
1200 rc += sysfs_emit_at(buf, rc, "\n");
1201 return rc;
c52ca478
DJ
1202}
1203static DEVICE_ATTR_RO(op_cap);
1204
9065958e
DJ
1205static ssize_t gen_cap_show(struct device *dev,
1206 struct device_attribute *attr, char *buf)
1207{
700af3a0 1208 struct idxd_device *idxd = confdev_to_idxd(dev);
9065958e 1209
8241571f 1210 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
9065958e
DJ
1211}
1212static DEVICE_ATTR_RO(gen_cap);
1213
c52ca478
DJ
1214static ssize_t configurable_show(struct device *dev,
1215 struct device_attribute *attr, char *buf)
1216{
700af3a0 1217 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1218
8241571f 1219 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
c52ca478
DJ
1220}
1221static DEVICE_ATTR_RO(configurable);
1222
1223static ssize_t clients_show(struct device *dev,
1224 struct device_attribute *attr, char *buf)
1225{
700af3a0 1226 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478
DJ
1227 unsigned long flags;
1228 int count = 0, i;
1229
1230 spin_lock_irqsave(&idxd->dev_lock, flags);
1231 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 1232 struct idxd_wq *wq = idxd->wqs[i];
c52ca478
DJ
1233
1234 count += wq->client_count;
1235 }
1236 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1237
8241571f 1238 return sysfs_emit(buf, "%d\n", count);
c52ca478
DJ
1239}
1240static DEVICE_ATTR_RO(clients);
1241
8e50d392
DJ
1242static ssize_t pasid_enabled_show(struct device *dev,
1243 struct device_attribute *attr, char *buf)
1244{
700af3a0 1245 struct idxd_device *idxd = confdev_to_idxd(dev);
8e50d392 1246
8241571f 1247 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
8e50d392
DJ
1248}
1249static DEVICE_ATTR_RO(pasid_enabled);
1250
c52ca478
DJ
1251static ssize_t state_show(struct device *dev,
1252 struct device_attribute *attr, char *buf)
1253{
700af3a0 1254 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478
DJ
1255
1256 switch (idxd->state) {
1257 case IDXD_DEV_DISABLED:
8241571f 1258 return sysfs_emit(buf, "disabled\n");
c52ca478 1259 case IDXD_DEV_ENABLED:
8241571f 1260 return sysfs_emit(buf, "enabled\n");
c52ca478 1261 case IDXD_DEV_HALTED:
8241571f 1262 return sysfs_emit(buf, "halted\n");
c52ca478
DJ
1263 }
1264
8241571f 1265 return sysfs_emit(buf, "unknown\n");
c52ca478
DJ
1266}
1267static DEVICE_ATTR_RO(state);
1268
1269static ssize_t errors_show(struct device *dev,
1270 struct device_attribute *attr, char *buf)
1271{
700af3a0 1272 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478
DJ
1273 int i, out = 0;
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&idxd->dev_lock, flags);
1277 for (i = 0; i < 4; i++)
8241571f 1278 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
c52ca478
DJ
1279 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1280 out--;
8241571f 1281 out += sysfs_emit_at(buf, out, "\n");
c52ca478
DJ
1282 return out;
1283}
1284static DEVICE_ATTR_RO(errors);
1285
1286static ssize_t max_tokens_show(struct device *dev,
1287 struct device_attribute *attr, char *buf)
1288{
700af3a0 1289 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1290
8241571f 1291 return sysfs_emit(buf, "%u\n", idxd->max_tokens);
c52ca478
DJ
1292}
1293static DEVICE_ATTR_RO(max_tokens);
1294
1295static ssize_t token_limit_show(struct device *dev,
1296 struct device_attribute *attr, char *buf)
1297{
700af3a0 1298 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478 1299
8241571f 1300 return sysfs_emit(buf, "%u\n", idxd->token_limit);
c52ca478
DJ
1301}
1302
1303static ssize_t token_limit_store(struct device *dev,
1304 struct device_attribute *attr,
1305 const char *buf, size_t count)
1306{
700af3a0 1307 struct idxd_device *idxd = confdev_to_idxd(dev);
c52ca478
DJ
1308 unsigned long val;
1309 int rc;
1310
1311 rc = kstrtoul(buf, 10, &val);
1312 if (rc < 0)
1313 return -EINVAL;
1314
1315 if (idxd->state == IDXD_DEV_ENABLED)
1316 return -EPERM;
1317
1318 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1319 return -EPERM;
1320
1321 if (!idxd->hw.group_cap.token_limit)
1322 return -EPERM;
1323
1324 if (val > idxd->hw.group_cap.total_tokens)
1325 return -EINVAL;
1326
1327 idxd->token_limit = val;
1328 return count;
1329}
1330static DEVICE_ATTR_RW(token_limit);
1331
42d279f9
DJ
1332static ssize_t cdev_major_show(struct device *dev,
1333 struct device_attribute *attr, char *buf)
1334{
700af3a0 1335 struct idxd_device *idxd = confdev_to_idxd(dev);
42d279f9 1336
8241571f 1337 return sysfs_emit(buf, "%u\n", idxd->major);
42d279f9
DJ
1338}
1339static DEVICE_ATTR_RO(cdev_major);
1340
ff18de55
DJ
1341static ssize_t cmd_status_show(struct device *dev,
1342 struct device_attribute *attr, char *buf)
1343{
700af3a0 1344 struct idxd_device *idxd = confdev_to_idxd(dev);
ff18de55 1345
8241571f 1346 return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
ff18de55
DJ
1347}
1348static DEVICE_ATTR_RO(cmd_status);
1349
c52ca478 1350static struct attribute *idxd_device_attributes[] = {
c2ce6bbc 1351 &dev_attr_version.attr,
c52ca478
DJ
1352 &dev_attr_max_groups.attr,
1353 &dev_attr_max_work_queues.attr,
1354 &dev_attr_max_work_queues_size.attr,
1355 &dev_attr_max_engines.attr,
1356 &dev_attr_numa_node.attr,
1357 &dev_attr_max_batch_size.attr,
1358 &dev_attr_max_transfer_size.attr,
1359 &dev_attr_op_cap.attr,
9065958e 1360 &dev_attr_gen_cap.attr,
c52ca478
DJ
1361 &dev_attr_configurable.attr,
1362 &dev_attr_clients.attr,
8e50d392 1363 &dev_attr_pasid_enabled.attr,
c52ca478
DJ
1364 &dev_attr_state.attr,
1365 &dev_attr_errors.attr,
1366 &dev_attr_max_tokens.attr,
1367 &dev_attr_token_limit.attr,
42d279f9 1368 &dev_attr_cdev_major.attr,
ff18de55 1369 &dev_attr_cmd_status.attr,
c52ca478
DJ
1370 NULL,
1371};
1372
1373static const struct attribute_group idxd_device_attribute_group = {
1374 .attrs = idxd_device_attributes,
1375};
1376
1377static const struct attribute_group *idxd_attribute_groups[] = {
1378 &idxd_device_attribute_group,
1379 NULL,
1380};
1381
47c16ac2
DJ
1382static void idxd_conf_device_release(struct device *dev)
1383{
700af3a0 1384 struct idxd_device *idxd = confdev_to_idxd(dev);
47c16ac2
DJ
1385
1386 kfree(idxd->groups);
1387 kfree(idxd->wqs);
1388 kfree(idxd->engines);
1389 kfree(idxd->irq_entries);
eb15e715 1390 kfree(idxd->int_handles);
4b73e4eb 1391 ida_free(&idxd_ida, idxd->id);
47c16ac2
DJ
1392 kfree(idxd);
1393}
1394
1395struct device_type dsa_device_type = {
1396 .name = "dsa",
1397 .release = idxd_conf_device_release,
1398 .groups = idxd_attribute_groups,
1399};
1400
1401struct device_type iax_device_type = {
1402 .name = "iax",
1403 .release = idxd_conf_device_release,
1404 .groups = idxd_attribute_groups,
1405};
1406
75b91130 1407static int idxd_register_engine_devices(struct idxd_device *idxd)
c52ca478 1408{
700af3a0 1409 struct idxd_engine *engine;
75b91130 1410 int i, j, rc;
c52ca478
DJ
1411
1412 for (i = 0; i < idxd->max_engines; i++) {
700af3a0
DJ
1413 engine = idxd->engines[i];
1414 rc = device_add(engine_confdev(engine));
75b91130 1415 if (rc < 0)
c52ca478 1416 goto cleanup;
c52ca478
DJ
1417 }
1418
1419 return 0;
1420
1421cleanup:
75b91130 1422 j = i - 1;
700af3a0
DJ
1423 for (; i < idxd->max_engines; i++) {
1424 engine = idxd->engines[i];
1425 put_device(engine_confdev(engine));
1426 }
c52ca478 1427
700af3a0
DJ
1428 while (j--) {
1429 engine = idxd->engines[j];
1430 device_unregister(engine_confdev(engine));
1431 }
c52ca478
DJ
1432 return rc;
1433}
1434
defe49f9 1435static int idxd_register_group_devices(struct idxd_device *idxd)
c52ca478 1436{
700af3a0 1437 struct idxd_group *group;
defe49f9 1438 int i, j, rc;
c52ca478
DJ
1439
1440 for (i = 0; i < idxd->max_groups; i++) {
700af3a0
DJ
1441 group = idxd->groups[i];
1442 rc = device_add(group_confdev(group));
defe49f9 1443 if (rc < 0)
c52ca478 1444 goto cleanup;
c52ca478
DJ
1445 }
1446
1447 return 0;
1448
1449cleanup:
defe49f9 1450 j = i - 1;
700af3a0
DJ
1451 for (; i < idxd->max_groups; i++) {
1452 group = idxd->groups[i];
1453 put_device(group_confdev(group));
1454 }
c52ca478 1455
700af3a0
DJ
1456 while (j--) {
1457 group = idxd->groups[j];
1458 device_unregister(group_confdev(group));
1459 }
c52ca478
DJ
1460 return rc;
1461}
1462
7c5dd23e 1463static int idxd_register_wq_devices(struct idxd_device *idxd)
c52ca478 1464{
700af3a0 1465 struct idxd_wq *wq;
7c5dd23e 1466 int i, rc, j;
c52ca478
DJ
1467
1468 for (i = 0; i < idxd->max_wqs; i++) {
700af3a0
DJ
1469 wq = idxd->wqs[i];
1470 rc = device_add(wq_confdev(wq));
7c5dd23e 1471 if (rc < 0)
c52ca478 1472 goto cleanup;
c52ca478
DJ
1473 }
1474
1475 return 0;
1476
1477cleanup:
7c5dd23e 1478 j = i - 1;
700af3a0
DJ
1479 for (; i < idxd->max_wqs; i++) {
1480 wq = idxd->wqs[i];
1481 put_device(wq_confdev(wq));
1482 }
c52ca478 1483
700af3a0
DJ
1484 while (j--) {
1485 wq = idxd->wqs[j];
1486 device_unregister(wq_confdev(wq));
1487 }
c52ca478
DJ
1488 return rc;
1489}
1490
47c16ac2 1491int idxd_register_devices(struct idxd_device *idxd)
c52ca478
DJ
1492{
1493 struct device *dev = &idxd->pdev->dev;
7c5dd23e 1494 int rc, i;
c52ca478 1495
700af3a0 1496 rc = device_add(idxd_confdev(idxd));
47c16ac2 1497 if (rc < 0)
c52ca478 1498 return rc;
c52ca478 1499
7c5dd23e 1500 rc = idxd_register_wq_devices(idxd);
c52ca478 1501 if (rc < 0) {
7c5dd23e
DJ
1502 dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1503 goto err_wq;
c52ca478
DJ
1504 }
1505
75b91130 1506 rc = idxd_register_engine_devices(idxd);
c52ca478 1507 if (rc < 0) {
75b91130
DJ
1508 dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1509 goto err_engine;
c52ca478
DJ
1510 }
1511
defe49f9 1512 rc = idxd_register_group_devices(idxd);
c52ca478 1513 if (rc < 0) {
defe49f9 1514 dev_dbg(dev, "Group device registering failed: %d\n", rc);
75b91130 1515 goto err_group;
c52ca478
DJ
1516 }
1517
1518 return 0;
7c5dd23e 1519
75b91130
DJ
1520 err_group:
1521 for (i = 0; i < idxd->max_engines; i++)
700af3a0 1522 device_unregister(engine_confdev(idxd->engines[i]));
75b91130 1523 err_engine:
7c5dd23e 1524 for (i = 0; i < idxd->max_wqs; i++)
700af3a0 1525 device_unregister(wq_confdev(idxd->wqs[i]));
7c5dd23e 1526 err_wq:
700af3a0 1527 device_del(idxd_confdev(idxd));
7c5dd23e 1528 return rc;
c52ca478
DJ
1529}
1530
47c16ac2 1531void idxd_unregister_devices(struct idxd_device *idxd)
c52ca478
DJ
1532{
1533 int i;
1534
1535 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 1536 struct idxd_wq *wq = idxd->wqs[i];
c52ca478 1537
700af3a0 1538 device_unregister(wq_confdev(wq));
c52ca478
DJ
1539 }
1540
1541 for (i = 0; i < idxd->max_engines; i++) {
75b91130 1542 struct idxd_engine *engine = idxd->engines[i];
c52ca478 1543
700af3a0 1544 device_unregister(engine_confdev(engine));
c52ca478
DJ
1545 }
1546
1547 for (i = 0; i < idxd->max_groups; i++) {
defe49f9 1548 struct idxd_group *group = idxd->groups[i];
c52ca478 1549
700af3a0 1550 device_unregister(group_confdev(group));
c52ca478 1551 }
c52ca478
DJ
1552}
1553
1554int idxd_register_bus_type(void)
1555{
4b73e4eb 1556 return bus_register(&dsa_bus_type);
c52ca478
DJ
1557}
1558
1559void idxd_unregister_bus_type(void)
1560{
4b73e4eb 1561 bus_unregister(&dsa_bus_type);
c52ca478 1562}