]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/fpga/dfl.c
Merge tag 'acpi-5.8-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[mirror_ubuntu-hirsute-kernel.git] / drivers / fpga / dfl.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13 #include <linux/module.h>
14
15 #include "dfl.h"
16
17 static DEFINE_MUTEX(dfl_id_mutex);
18
19 /*
20 * when adding a new feature dev support in DFL framework, it's required to
21 * add a new item in enum dfl_id_type and provide related information in below
22 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
23 * platform device creation (define name strings in dfl.h, as they could be
24 * reused by platform device drivers).
25 *
26 * if the new feature dev needs chardev support, then it's required to add
27 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
28 * index to dfl_chardevs table. If no chardev support just set devt_type
29 * as one invalid index (DFL_FPGA_DEVT_MAX).
30 */
31 enum dfl_id_type {
32 FME_ID, /* fme id allocation and mapping */
33 PORT_ID, /* port id allocation and mapping */
34 DFL_ID_MAX,
35 };
36
37 enum dfl_fpga_devt_type {
38 DFL_FPGA_DEVT_FME,
39 DFL_FPGA_DEVT_PORT,
40 DFL_FPGA_DEVT_MAX,
41 };
42
43 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
44
45 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
46 "dfl-fme-pdata",
47 "dfl-port-pdata",
48 };
49
50 /**
51 * dfl_dev_info - dfl feature device information.
52 * @name: name string of the feature platform device.
53 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
54 * @id: idr id of the feature dev.
55 * @devt_type: index to dfl_chrdevs[].
56 */
57 struct dfl_dev_info {
58 const char *name;
59 u32 dfh_id;
60 struct idr id;
61 enum dfl_fpga_devt_type devt_type;
62 };
63
64 /* it is indexed by dfl_id_type */
65 static struct dfl_dev_info dfl_devs[] = {
66 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
67 .devt_type = DFL_FPGA_DEVT_FME},
68 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
69 .devt_type = DFL_FPGA_DEVT_PORT},
70 };
71
72 /**
73 * dfl_chardev_info - chardev information of dfl feature device
74 * @name: nmae string of the char device.
75 * @devt: devt of the char device.
76 */
77 struct dfl_chardev_info {
78 const char *name;
79 dev_t devt;
80 };
81
82 /* indexed by enum dfl_fpga_devt_type */
83 static struct dfl_chardev_info dfl_chrdevs[] = {
84 {.name = DFL_FPGA_FEATURE_DEV_FME},
85 {.name = DFL_FPGA_FEATURE_DEV_PORT},
86 };
87
88 static void dfl_ids_init(void)
89 {
90 int i;
91
92 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
93 idr_init(&dfl_devs[i].id);
94 }
95
96 static void dfl_ids_destroy(void)
97 {
98 int i;
99
100 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
101 idr_destroy(&dfl_devs[i].id);
102 }
103
104 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
105 {
106 int id;
107
108 WARN_ON(type >= DFL_ID_MAX);
109 mutex_lock(&dfl_id_mutex);
110 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
111 mutex_unlock(&dfl_id_mutex);
112
113 return id;
114 }
115
116 static void dfl_id_free(enum dfl_id_type type, int id)
117 {
118 WARN_ON(type >= DFL_ID_MAX);
119 mutex_lock(&dfl_id_mutex);
120 idr_remove(&dfl_devs[type].id, id);
121 mutex_unlock(&dfl_id_mutex);
122 }
123
124 static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
125 {
126 int i;
127
128 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
129 if (!strcmp(dfl_devs[i].name, pdev->name))
130 return i;
131
132 return DFL_ID_MAX;
133 }
134
135 static enum dfl_id_type dfh_id_to_type(u32 id)
136 {
137 int i;
138
139 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
140 if (dfl_devs[i].dfh_id == id)
141 return i;
142
143 return DFL_ID_MAX;
144 }
145
146 /*
147 * introduce a global port_ops list, it allows port drivers to register ops
148 * in such list, then other feature devices (e.g. FME), could use the port
149 * functions even related port platform device is hidden. Below is one example,
150 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
151 * enabled, port (and it's AFU) is turned into VF and port platform device
152 * is hidden from system but it's still required to access port to finish FPGA
153 * reconfiguration function in FME.
154 */
155
156 static DEFINE_MUTEX(dfl_port_ops_mutex);
157 static LIST_HEAD(dfl_port_ops_list);
158
159 /**
160 * dfl_fpga_port_ops_get - get matched port ops from the global list
161 * @pdev: platform device to match with associated port ops.
162 * Return: matched port ops on success, NULL otherwise.
163 *
164 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
165 */
166 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
167 {
168 struct dfl_fpga_port_ops *ops = NULL;
169
170 mutex_lock(&dfl_port_ops_mutex);
171 if (list_empty(&dfl_port_ops_list))
172 goto done;
173
174 list_for_each_entry(ops, &dfl_port_ops_list, node) {
175 /* match port_ops using the name of platform device */
176 if (!strcmp(pdev->name, ops->name)) {
177 if (!try_module_get(ops->owner))
178 ops = NULL;
179 goto done;
180 }
181 }
182
183 ops = NULL;
184 done:
185 mutex_unlock(&dfl_port_ops_mutex);
186 return ops;
187 }
188 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
189
190 /**
191 * dfl_fpga_port_ops_put - put port ops
192 * @ops: port ops.
193 */
194 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
195 {
196 if (ops && ops->owner)
197 module_put(ops->owner);
198 }
199 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
200
201 /**
202 * dfl_fpga_port_ops_add - add port_ops to global list
203 * @ops: port ops to add.
204 */
205 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
206 {
207 mutex_lock(&dfl_port_ops_mutex);
208 list_add_tail(&ops->node, &dfl_port_ops_list);
209 mutex_unlock(&dfl_port_ops_mutex);
210 }
211 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
212
213 /**
214 * dfl_fpga_port_ops_del - remove port_ops from global list
215 * @ops: port ops to del.
216 */
217 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
218 {
219 mutex_lock(&dfl_port_ops_mutex);
220 list_del(&ops->node);
221 mutex_unlock(&dfl_port_ops_mutex);
222 }
223 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
224
225 /**
226 * dfl_fpga_check_port_id - check the port id
227 * @pdev: port platform device.
228 * @pport_id: port id to compare.
229 *
230 * Return: 1 if port device matches with given port id, otherwise 0.
231 */
232 int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
233 {
234 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
235 struct dfl_fpga_port_ops *port_ops;
236
237 if (pdata->id != FEATURE_DEV_ID_UNUSED)
238 return pdata->id == *(int *)pport_id;
239
240 port_ops = dfl_fpga_port_ops_get(pdev);
241 if (!port_ops || !port_ops->get_id)
242 return 0;
243
244 pdata->id = port_ops->get_id(pdev);
245 dfl_fpga_port_ops_put(port_ops);
246
247 return pdata->id == *(int *)pport_id;
248 }
249 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
250
251 /**
252 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
253 * @pdev: feature device.
254 */
255 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
256 {
257 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
258 struct dfl_feature *feature;
259
260 dfl_fpga_dev_for_each_feature(pdata, feature)
261 if (feature->ops) {
262 if (feature->ops->uinit)
263 feature->ops->uinit(pdev, feature);
264 feature->ops = NULL;
265 }
266 }
267 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
268
269 static int dfl_feature_instance_init(struct platform_device *pdev,
270 struct dfl_feature_platform_data *pdata,
271 struct dfl_feature *feature,
272 struct dfl_feature_driver *drv)
273 {
274 int ret = 0;
275
276 if (drv->ops->init) {
277 ret = drv->ops->init(pdev, feature);
278 if (ret)
279 return ret;
280 }
281
282 feature->ops = drv->ops;
283
284 return ret;
285 }
286
287 static bool dfl_feature_drv_match(struct dfl_feature *feature,
288 struct dfl_feature_driver *driver)
289 {
290 const struct dfl_feature_id *ids = driver->id_table;
291
292 if (ids) {
293 while (ids->id) {
294 if (ids->id == feature->id)
295 return true;
296 ids++;
297 }
298 }
299 return false;
300 }
301
302 /**
303 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
304 * @pdev: feature device.
305 * @feature_drvs: drvs for sub features.
306 *
307 * This function will match sub features with given feature drvs list and
308 * use matched drv to init related sub feature.
309 *
310 * Return: 0 on success, negative error code otherwise.
311 */
312 int dfl_fpga_dev_feature_init(struct platform_device *pdev,
313 struct dfl_feature_driver *feature_drvs)
314 {
315 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
316 struct dfl_feature_driver *drv = feature_drvs;
317 struct dfl_feature *feature;
318 int ret;
319
320 while (drv->ops) {
321 dfl_fpga_dev_for_each_feature(pdata, feature) {
322 if (dfl_feature_drv_match(feature, drv)) {
323 ret = dfl_feature_instance_init(pdev, pdata,
324 feature, drv);
325 if (ret)
326 goto exit;
327 }
328 }
329 drv++;
330 }
331
332 return 0;
333 exit:
334 dfl_fpga_dev_feature_uinit(pdev);
335 return ret;
336 }
337 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
338
339 static void dfl_chardev_uinit(void)
340 {
341 int i;
342
343 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
344 if (MAJOR(dfl_chrdevs[i].devt)) {
345 unregister_chrdev_region(dfl_chrdevs[i].devt,
346 MINORMASK + 1);
347 dfl_chrdevs[i].devt = MKDEV(0, 0);
348 }
349 }
350
351 static int dfl_chardev_init(void)
352 {
353 int i, ret;
354
355 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
356 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
357 MINORMASK + 1, dfl_chrdevs[i].name);
358 if (ret)
359 goto exit;
360 }
361
362 return 0;
363
364 exit:
365 dfl_chardev_uinit();
366 return ret;
367 }
368
369 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
370 {
371 if (type >= DFL_FPGA_DEVT_MAX)
372 return 0;
373
374 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
375 }
376
377 /**
378 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
379 *
380 * @pdev: feature dev.
381 * @fops: file operations for feature dev's cdev.
382 * @owner: owning module/driver.
383 *
384 * Return: 0 on success, negative error code otherwise.
385 */
386 int dfl_fpga_dev_ops_register(struct platform_device *pdev,
387 const struct file_operations *fops,
388 struct module *owner)
389 {
390 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
391
392 cdev_init(&pdata->cdev, fops);
393 pdata->cdev.owner = owner;
394
395 /*
396 * set parent to the feature device so that its refcount is
397 * decreased after the last refcount of cdev is gone, that
398 * makes sure the feature device is valid during device
399 * file's life-cycle.
400 */
401 pdata->cdev.kobj.parent = &pdev->dev.kobj;
402
403 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
404 }
405 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
406
407 /**
408 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
409 * @pdev: feature dev.
410 */
411 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
412 {
413 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
414
415 cdev_del(&pdata->cdev);
416 }
417 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
418
419 /**
420 * struct build_feature_devs_info - info collected during feature dev build.
421 *
422 * @dev: device to enumerate.
423 * @cdev: the container device for all feature devices.
424 * @feature_dev: current feature device.
425 * @ioaddr: header register region address of feature device in enumeration.
426 * @sub_features: a sub features linked list for feature device in enumeration.
427 * @feature_num: number of sub features for feature device in enumeration.
428 */
429 struct build_feature_devs_info {
430 struct device *dev;
431 struct dfl_fpga_cdev *cdev;
432 struct platform_device *feature_dev;
433 void __iomem *ioaddr;
434 struct list_head sub_features;
435 int feature_num;
436 };
437
438 /**
439 * struct dfl_feature_info - sub feature info collected during feature dev build
440 *
441 * @fid: id of this sub feature.
442 * @mmio_res: mmio resource of this sub feature.
443 * @ioaddr: mapped base address of mmio resource.
444 * @node: node in sub_features linked list.
445 */
446 struct dfl_feature_info {
447 u64 fid;
448 struct resource mmio_res;
449 void __iomem *ioaddr;
450 struct list_head node;
451 };
452
453 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
454 struct platform_device *port)
455 {
456 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
457
458 mutex_lock(&cdev->lock);
459 list_add(&pdata->node, &cdev->port_dev_list);
460 get_device(&pdata->dev->dev);
461 mutex_unlock(&cdev->lock);
462 }
463
464 /*
465 * register current feature device, it is called when we need to switch to
466 * another feature parsing or we have parsed all features on given device
467 * feature list.
468 */
469 static int build_info_commit_dev(struct build_feature_devs_info *binfo)
470 {
471 struct platform_device *fdev = binfo->feature_dev;
472 struct dfl_feature_platform_data *pdata;
473 struct dfl_feature_info *finfo, *p;
474 enum dfl_id_type type;
475 int ret, index = 0;
476
477 if (!fdev)
478 return 0;
479
480 type = feature_dev_id_type(fdev);
481 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
482 return -EINVAL;
483
484 /*
485 * we do not need to care for the memory which is associated with
486 * the platform device. After calling platform_device_unregister(),
487 * it will be automatically freed by device's release() callback,
488 * platform_device_release().
489 */
490 pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
491 GFP_KERNEL);
492 if (!pdata)
493 return -ENOMEM;
494
495 pdata->dev = fdev;
496 pdata->num = binfo->feature_num;
497 pdata->dfl_cdev = binfo->cdev;
498 pdata->id = FEATURE_DEV_ID_UNUSED;
499 mutex_init(&pdata->lock);
500 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
501 dfl_pdata_key_strings[type]);
502
503 /*
504 * the count should be initialized to 0 to make sure
505 *__fpga_port_enable() following __fpga_port_disable()
506 * works properly for port device.
507 * and it should always be 0 for fme device.
508 */
509 WARN_ON(pdata->disable_count);
510
511 fdev->dev.platform_data = pdata;
512
513 /* each sub feature has one MMIO resource */
514 fdev->num_resources = binfo->feature_num;
515 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
516 GFP_KERNEL);
517 if (!fdev->resource)
518 return -ENOMEM;
519
520 /* fill features and resource information for feature dev */
521 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
522 struct dfl_feature *feature = &pdata->features[index];
523
524 /* save resource information for each feature */
525 feature->id = finfo->fid;
526 feature->resource_index = index;
527 feature->ioaddr = finfo->ioaddr;
528 fdev->resource[index++] = finfo->mmio_res;
529
530 list_del(&finfo->node);
531 kfree(finfo);
532 }
533
534 ret = platform_device_add(binfo->feature_dev);
535 if (!ret) {
536 if (type == PORT_ID)
537 dfl_fpga_cdev_add_port_dev(binfo->cdev,
538 binfo->feature_dev);
539 else
540 binfo->cdev->fme_dev =
541 get_device(&binfo->feature_dev->dev);
542 /*
543 * reset it to avoid build_info_free() freeing their resource.
544 *
545 * The resource of successfully registered feature devices
546 * will be freed by platform_device_unregister(). See the
547 * comments in build_info_create_dev().
548 */
549 binfo->feature_dev = NULL;
550 }
551
552 return ret;
553 }
554
555 static int
556 build_info_create_dev(struct build_feature_devs_info *binfo,
557 enum dfl_id_type type, void __iomem *ioaddr)
558 {
559 struct platform_device *fdev;
560 int ret;
561
562 if (type >= DFL_ID_MAX)
563 return -EINVAL;
564
565 /* we will create a new device, commit current device first */
566 ret = build_info_commit_dev(binfo);
567 if (ret)
568 return ret;
569
570 /*
571 * we use -ENODEV as the initialization indicator which indicates
572 * whether the id need to be reclaimed
573 */
574 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
575 if (!fdev)
576 return -ENOMEM;
577
578 binfo->feature_dev = fdev;
579 binfo->feature_num = 0;
580 binfo->ioaddr = ioaddr;
581 INIT_LIST_HEAD(&binfo->sub_features);
582
583 fdev->id = dfl_id_alloc(type, &fdev->dev);
584 if (fdev->id < 0)
585 return fdev->id;
586
587 fdev->dev.parent = &binfo->cdev->region->dev;
588 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
589
590 return 0;
591 }
592
593 static void build_info_free(struct build_feature_devs_info *binfo)
594 {
595 struct dfl_feature_info *finfo, *p;
596
597 /*
598 * it is a valid id, free it. See comments in
599 * build_info_create_dev()
600 */
601 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
602 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
603 binfo->feature_dev->id);
604
605 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
606 list_del(&finfo->node);
607 kfree(finfo);
608 }
609 }
610
611 platform_device_put(binfo->feature_dev);
612
613 devm_kfree(binfo->dev, binfo);
614 }
615
616 static inline u32 feature_size(void __iomem *start)
617 {
618 u64 v = readq(start + DFH);
619 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
620 /* workaround for private features with invalid size, use 4K instead */
621 return ofst ? ofst : 4096;
622 }
623
624 static u64 feature_id(void __iomem *start)
625 {
626 u64 v = readq(start + DFH);
627 u16 id = FIELD_GET(DFH_ID, v);
628 u8 type = FIELD_GET(DFH_TYPE, v);
629
630 if (type == DFH_TYPE_FIU)
631 return FEATURE_ID_FIU_HEADER;
632 else if (type == DFH_TYPE_PRIVATE)
633 return id;
634 else if (type == DFH_TYPE_AFU)
635 return FEATURE_ID_AFU;
636
637 WARN_ON(1);
638 return 0;
639 }
640
641 /*
642 * when create sub feature instances, for private features, it doesn't need
643 * to provide resource size and feature id as they could be read from DFH
644 * register. For afu sub feature, its register region only contains user
645 * defined registers, so never trust any information from it, just use the
646 * resource size information provided by its parent FIU.
647 */
648 static int
649 create_feature_instance(struct build_feature_devs_info *binfo,
650 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
651 resource_size_t size, u64 fid)
652 {
653 struct dfl_feature_info *finfo;
654
655 /* read feature size and id if inputs are invalid */
656 size = size ? size : feature_size(dfl->ioaddr + ofst);
657 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
658
659 if (dfl->len - ofst < size)
660 return -EINVAL;
661
662 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
663 if (!finfo)
664 return -ENOMEM;
665
666 finfo->fid = fid;
667 finfo->mmio_res.start = dfl->start + ofst;
668 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
669 finfo->mmio_res.flags = IORESOURCE_MEM;
670 finfo->ioaddr = dfl->ioaddr + ofst;
671
672 list_add_tail(&finfo->node, &binfo->sub_features);
673 binfo->feature_num++;
674
675 return 0;
676 }
677
678 static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
679 struct dfl_fpga_enum_dfl *dfl,
680 resource_size_t ofst)
681 {
682 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
683 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
684
685 WARN_ON(!size);
686
687 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
688 }
689
690 static int parse_feature_afu(struct build_feature_devs_info *binfo,
691 struct dfl_fpga_enum_dfl *dfl,
692 resource_size_t ofst)
693 {
694 if (!binfo->feature_dev) {
695 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
696 return -EINVAL;
697 }
698
699 switch (feature_dev_id_type(binfo->feature_dev)) {
700 case PORT_ID:
701 return parse_feature_port_afu(binfo, dfl, ofst);
702 default:
703 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
704 binfo->feature_dev->name);
705 }
706
707 return 0;
708 }
709
710 static int parse_feature_fiu(struct build_feature_devs_info *binfo,
711 struct dfl_fpga_enum_dfl *dfl,
712 resource_size_t ofst)
713 {
714 u32 id, offset;
715 u64 v;
716 int ret = 0;
717
718 v = readq(dfl->ioaddr + ofst + DFH);
719 id = FIELD_GET(DFH_ID, v);
720
721 /* create platform device for dfl feature dev */
722 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
723 dfl->ioaddr + ofst);
724 if (ret)
725 return ret;
726
727 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
728 if (ret)
729 return ret;
730 /*
731 * find and parse FIU's child AFU via its NEXT_AFU register.
732 * please note that only Port has valid NEXT_AFU pointer per spec.
733 */
734 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
735
736 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
737 if (offset)
738 return parse_feature_afu(binfo, dfl, ofst + offset);
739
740 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
741
742 return ret;
743 }
744
745 static int parse_feature_private(struct build_feature_devs_info *binfo,
746 struct dfl_fpga_enum_dfl *dfl,
747 resource_size_t ofst)
748 {
749 if (!binfo->feature_dev) {
750 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
751 (unsigned long long)feature_id(dfl->ioaddr + ofst));
752 return -EINVAL;
753 }
754
755 return create_feature_instance(binfo, dfl, ofst, 0, 0);
756 }
757
758 /**
759 * parse_feature - parse a feature on given device feature list
760 *
761 * @binfo: build feature devices information.
762 * @dfl: device feature list to parse
763 * @ofst: offset to feature header on this device feature list
764 */
765 static int parse_feature(struct build_feature_devs_info *binfo,
766 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
767 {
768 u64 v;
769 u32 type;
770
771 v = readq(dfl->ioaddr + ofst + DFH);
772 type = FIELD_GET(DFH_TYPE, v);
773
774 switch (type) {
775 case DFH_TYPE_AFU:
776 return parse_feature_afu(binfo, dfl, ofst);
777 case DFH_TYPE_PRIVATE:
778 return parse_feature_private(binfo, dfl, ofst);
779 case DFH_TYPE_FIU:
780 return parse_feature_fiu(binfo, dfl, ofst);
781 default:
782 dev_info(binfo->dev,
783 "Feature Type %x is not supported.\n", type);
784 }
785
786 return 0;
787 }
788
789 static int parse_feature_list(struct build_feature_devs_info *binfo,
790 struct dfl_fpga_enum_dfl *dfl)
791 {
792 void __iomem *start = dfl->ioaddr;
793 void __iomem *end = dfl->ioaddr + dfl->len;
794 int ret = 0;
795 u32 ofst = 0;
796 u64 v;
797
798 /* walk through the device feature list via DFH's next DFH pointer. */
799 for (; start < end; start += ofst) {
800 if (end - start < DFH_SIZE) {
801 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
802 return -EINVAL;
803 }
804
805 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
806 if (ret)
807 return ret;
808
809 v = readq(start + DFH);
810 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
811
812 /* stop parsing if EOL(End of List) is set or offset is 0 */
813 if ((v & DFH_EOL) || !ofst)
814 break;
815 }
816
817 /* commit current feature device when reach the end of list */
818 return build_info_commit_dev(binfo);
819 }
820
821 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
822 {
823 struct dfl_fpga_enum_info *info;
824
825 get_device(dev);
826
827 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
828 if (!info) {
829 put_device(dev);
830 return NULL;
831 }
832
833 info->dev = dev;
834 INIT_LIST_HEAD(&info->dfls);
835
836 return info;
837 }
838 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
839
840 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
841 {
842 struct dfl_fpga_enum_dfl *tmp, *dfl;
843 struct device *dev;
844
845 if (!info)
846 return;
847
848 dev = info->dev;
849
850 /* remove all device feature lists in the list. */
851 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
852 list_del(&dfl->node);
853 devm_kfree(dev, dfl);
854 }
855
856 devm_kfree(dev, info);
857 put_device(dev);
858 }
859 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
860
861 /**
862 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
863 *
864 * @info: ptr to dfl_fpga_enum_info
865 * @start: mmio resource address of the device feature list.
866 * @len: mmio resource length of the device feature list.
867 * @ioaddr: mapped mmio resource address of the device feature list.
868 *
869 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
870 * function to add information of each DFL to common data structure for next
871 * step enumeration.
872 *
873 * Return: 0 on success, negative error code otherwise.
874 */
875 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
876 resource_size_t start, resource_size_t len,
877 void __iomem *ioaddr)
878 {
879 struct dfl_fpga_enum_dfl *dfl;
880
881 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
882 if (!dfl)
883 return -ENOMEM;
884
885 dfl->start = start;
886 dfl->len = len;
887 dfl->ioaddr = ioaddr;
888
889 list_add_tail(&dfl->node, &info->dfls);
890
891 return 0;
892 }
893 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
894
895 static int remove_feature_dev(struct device *dev, void *data)
896 {
897 struct platform_device *pdev = to_platform_device(dev);
898 enum dfl_id_type type = feature_dev_id_type(pdev);
899 int id = pdev->id;
900
901 platform_device_unregister(pdev);
902
903 dfl_id_free(type, id);
904
905 return 0;
906 }
907
908 static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
909 {
910 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
911 }
912
913 /**
914 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
915 * @info: information for enumeration.
916 *
917 * This function creates a container device (base FPGA region), enumerates
918 * feature devices based on the enumeration info and creates platform devices
919 * under the container device.
920 *
921 * Return: dfl_fpga_cdev struct on success, -errno on failure
922 */
923 struct dfl_fpga_cdev *
924 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
925 {
926 struct build_feature_devs_info *binfo;
927 struct dfl_fpga_enum_dfl *dfl;
928 struct dfl_fpga_cdev *cdev;
929 int ret = 0;
930
931 if (!info->dev)
932 return ERR_PTR(-ENODEV);
933
934 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
935 if (!cdev)
936 return ERR_PTR(-ENOMEM);
937
938 cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
939 if (!cdev->region) {
940 ret = -ENOMEM;
941 goto free_cdev_exit;
942 }
943
944 cdev->parent = info->dev;
945 mutex_init(&cdev->lock);
946 INIT_LIST_HEAD(&cdev->port_dev_list);
947
948 ret = fpga_region_register(cdev->region);
949 if (ret)
950 goto free_cdev_exit;
951
952 /* create and init build info for enumeration */
953 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
954 if (!binfo) {
955 ret = -ENOMEM;
956 goto unregister_region_exit;
957 }
958
959 binfo->dev = info->dev;
960 binfo->cdev = cdev;
961
962 /*
963 * start enumeration for all feature devices based on Device Feature
964 * Lists.
965 */
966 list_for_each_entry(dfl, &info->dfls, node) {
967 ret = parse_feature_list(binfo, dfl);
968 if (ret) {
969 remove_feature_devs(cdev);
970 build_info_free(binfo);
971 goto unregister_region_exit;
972 }
973 }
974
975 build_info_free(binfo);
976
977 return cdev;
978
979 unregister_region_exit:
980 fpga_region_unregister(cdev->region);
981 free_cdev_exit:
982 devm_kfree(info->dev, cdev);
983 return ERR_PTR(ret);
984 }
985 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
986
987 /**
988 * dfl_fpga_feature_devs_remove - remove all feature devices
989 * @cdev: fpga container device.
990 *
991 * Remove the container device and all feature devices under given container
992 * devices.
993 */
994 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
995 {
996 struct dfl_feature_platform_data *pdata, *ptmp;
997
998 mutex_lock(&cdev->lock);
999 if (cdev->fme_dev)
1000 put_device(cdev->fme_dev);
1001
1002 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1003 struct platform_device *port_dev = pdata->dev;
1004
1005 /* remove released ports */
1006 if (!device_is_registered(&port_dev->dev)) {
1007 dfl_id_free(feature_dev_id_type(port_dev),
1008 port_dev->id);
1009 platform_device_put(port_dev);
1010 }
1011
1012 list_del(&pdata->node);
1013 put_device(&port_dev->dev);
1014 }
1015 mutex_unlock(&cdev->lock);
1016
1017 remove_feature_devs(cdev);
1018
1019 fpga_region_unregister(cdev->region);
1020 devm_kfree(cdev->parent, cdev);
1021 }
1022 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1023
1024 /**
1025 * __dfl_fpga_cdev_find_port - find a port under given container device
1026 *
1027 * @cdev: container device
1028 * @data: data passed to match function
1029 * @match: match function used to find specific port from the port device list
1030 *
1031 * Find a port device under container device. This function needs to be
1032 * invoked with lock held.
1033 *
1034 * Return: pointer to port's platform device if successful, NULL otherwise.
1035 *
1036 * NOTE: you will need to drop the device reference with put_device() after use.
1037 */
1038 struct platform_device *
1039 __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1040 int (*match)(struct platform_device *, void *))
1041 {
1042 struct dfl_feature_platform_data *pdata;
1043 struct platform_device *port_dev;
1044
1045 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1046 port_dev = pdata->dev;
1047
1048 if (match(port_dev, data) && get_device(&port_dev->dev))
1049 return port_dev;
1050 }
1051
1052 return NULL;
1053 }
1054 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1055
1056 static int __init dfl_fpga_init(void)
1057 {
1058 int ret;
1059
1060 dfl_ids_init();
1061
1062 ret = dfl_chardev_init();
1063 if (ret)
1064 dfl_ids_destroy();
1065
1066 return ret;
1067 }
1068
1069 /**
1070 * dfl_fpga_cdev_release_port - release a port platform device
1071 *
1072 * @cdev: parent container device.
1073 * @port_id: id of the port platform device.
1074 *
1075 * This function allows user to release a port platform device. This is a
1076 * mandatory step before turn a port from PF into VF for SRIOV support.
1077 *
1078 * Return: 0 on success, negative error code otherwise.
1079 */
1080 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1081 {
1082 struct dfl_feature_platform_data *pdata;
1083 struct platform_device *port_pdev;
1084 int ret = -ENODEV;
1085
1086 mutex_lock(&cdev->lock);
1087 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1088 dfl_fpga_check_port_id);
1089 if (!port_pdev)
1090 goto unlock_exit;
1091
1092 if (!device_is_registered(&port_pdev->dev)) {
1093 ret = -EBUSY;
1094 goto put_dev_exit;
1095 }
1096
1097 pdata = dev_get_platdata(&port_pdev->dev);
1098
1099 mutex_lock(&pdata->lock);
1100 ret = dfl_feature_dev_use_begin(pdata, true);
1101 mutex_unlock(&pdata->lock);
1102 if (ret)
1103 goto put_dev_exit;
1104
1105 platform_device_del(port_pdev);
1106 cdev->released_port_num++;
1107 put_dev_exit:
1108 put_device(&port_pdev->dev);
1109 unlock_exit:
1110 mutex_unlock(&cdev->lock);
1111 return ret;
1112 }
1113 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1114
1115 /**
1116 * dfl_fpga_cdev_assign_port - assign a port platform device back
1117 *
1118 * @cdev: parent container device.
1119 * @port_id: id of the port platform device.
1120 *
1121 * This function allows user to assign a port platform device back. This is
1122 * a mandatory step after disable SRIOV support.
1123 *
1124 * Return: 0 on success, negative error code otherwise.
1125 */
1126 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1127 {
1128 struct dfl_feature_platform_data *pdata;
1129 struct platform_device *port_pdev;
1130 int ret = -ENODEV;
1131
1132 mutex_lock(&cdev->lock);
1133 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1134 dfl_fpga_check_port_id);
1135 if (!port_pdev)
1136 goto unlock_exit;
1137
1138 if (device_is_registered(&port_pdev->dev)) {
1139 ret = -EBUSY;
1140 goto put_dev_exit;
1141 }
1142
1143 ret = platform_device_add(port_pdev);
1144 if (ret)
1145 goto put_dev_exit;
1146
1147 pdata = dev_get_platdata(&port_pdev->dev);
1148
1149 mutex_lock(&pdata->lock);
1150 dfl_feature_dev_use_end(pdata);
1151 mutex_unlock(&pdata->lock);
1152
1153 cdev->released_port_num--;
1154 put_dev_exit:
1155 put_device(&port_pdev->dev);
1156 unlock_exit:
1157 mutex_unlock(&cdev->lock);
1158 return ret;
1159 }
1160 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1161
1162 static void config_port_access_mode(struct device *fme_dev, int port_id,
1163 bool is_vf)
1164 {
1165 void __iomem *base;
1166 u64 v;
1167
1168 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1169
1170 v = readq(base + FME_HDR_PORT_OFST(port_id));
1171
1172 v &= ~FME_PORT_OFST_ACC_CTRL;
1173 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1174 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1175
1176 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1177 }
1178
1179 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1180 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1181
1182 /**
1183 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1184 *
1185 * @cdev: parent container device.
1186 *
1187 * This function is needed in sriov configuration routine. It could be used to
1188 * configure the all released ports from VF access mode to PF.
1189 */
1190 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1191 {
1192 struct dfl_feature_platform_data *pdata;
1193
1194 mutex_lock(&cdev->lock);
1195 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1196 if (device_is_registered(&pdata->dev->dev))
1197 continue;
1198
1199 config_port_pf_mode(cdev->fme_dev, pdata->id);
1200 }
1201 mutex_unlock(&cdev->lock);
1202 }
1203 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1204
1205 /**
1206 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1207 *
1208 * @cdev: parent container device.
1209 * @num_vfs: VF device number.
1210 *
1211 * This function is needed in sriov configuration routine. It could be used to
1212 * configure the released ports from PF access mode to VF.
1213 *
1214 * Return: 0 on success, negative error code otherwise.
1215 */
1216 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1217 {
1218 struct dfl_feature_platform_data *pdata;
1219 int ret = 0;
1220
1221 mutex_lock(&cdev->lock);
1222 /*
1223 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1224 * device, so if released port number doesn't match VF device number,
1225 * then reject the request with -EINVAL error code.
1226 */
1227 if (cdev->released_port_num != num_vfs) {
1228 ret = -EINVAL;
1229 goto done;
1230 }
1231
1232 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1233 if (device_is_registered(&pdata->dev->dev))
1234 continue;
1235
1236 config_port_vf_mode(cdev->fme_dev, pdata->id);
1237 }
1238 done:
1239 mutex_unlock(&cdev->lock);
1240 return ret;
1241 }
1242 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1243
1244 static void __exit dfl_fpga_exit(void)
1245 {
1246 dfl_chardev_uinit();
1247 dfl_ids_destroy();
1248 }
1249
1250 module_init(dfl_fpga_init);
1251 module_exit(dfl_fpga_exit);
1252
1253 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1254 MODULE_AUTHOR("Intel Corporation");
1255 MODULE_LICENSE("GPL v2");