]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/thunderbolt/domain.c
Merge tag 'drm-next-2021-04-30' of git://anongit.freedesktop.org/drm/drm
[mirror_ubuntu-jammy-kernel.git] / drivers / thunderbolt / domain.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt bus support
4 *
5 * Copyright (C) 2017, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/dmar.h>
11 #include <linux/idr.h>
12 #include <linux/iommu.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <crypto/hash.h>
18
19 #include "tb.h"
20
21 static DEFINE_IDA(tb_domain_ida);
22
23 static bool match_service_id(const struct tb_service_id *id,
24 const struct tb_service *svc)
25 {
26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 if (strcmp(id->protocol_key, svc->key))
28 return false;
29 }
30
31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 if (id->protocol_id != svc->prtcid)
33 return false;
34 }
35
36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 if (id->protocol_version != svc->prtcvers)
38 return false;
39 }
40
41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 if (id->protocol_revision != svc->prtcrevs)
43 return false;
44 }
45
46 return true;
47 }
48
49 static const struct tb_service_id *__tb_service_match(struct device *dev,
50 struct device_driver *drv)
51 {
52 struct tb_service_driver *driver;
53 const struct tb_service_id *ids;
54 struct tb_service *svc;
55
56 svc = tb_to_service(dev);
57 if (!svc)
58 return NULL;
59
60 driver = container_of(drv, struct tb_service_driver, driver);
61 if (!driver->id_table)
62 return NULL;
63
64 for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 if (match_service_id(ids, svc))
66 return ids;
67 }
68
69 return NULL;
70 }
71
72 static int tb_service_match(struct device *dev, struct device_driver *drv)
73 {
74 return !!__tb_service_match(dev, drv);
75 }
76
77 static int tb_service_probe(struct device *dev)
78 {
79 struct tb_service *svc = tb_to_service(dev);
80 struct tb_service_driver *driver;
81 const struct tb_service_id *id;
82
83 driver = container_of(dev->driver, struct tb_service_driver, driver);
84 id = __tb_service_match(dev, &driver->driver);
85
86 return driver->probe(svc, id);
87 }
88
89 static int tb_service_remove(struct device *dev)
90 {
91 struct tb_service *svc = tb_to_service(dev);
92 struct tb_service_driver *driver;
93
94 driver = container_of(dev->driver, struct tb_service_driver, driver);
95 if (driver->remove)
96 driver->remove(svc);
97
98 return 0;
99 }
100
101 static void tb_service_shutdown(struct device *dev)
102 {
103 struct tb_service_driver *driver;
104 struct tb_service *svc;
105
106 svc = tb_to_service(dev);
107 if (!svc || !dev->driver)
108 return;
109
110 driver = container_of(dev->driver, struct tb_service_driver, driver);
111 if (driver->shutdown)
112 driver->shutdown(svc);
113 }
114
115 static const char * const tb_security_names[] = {
116 [TB_SECURITY_NONE] = "none",
117 [TB_SECURITY_USER] = "user",
118 [TB_SECURITY_SECURE] = "secure",
119 [TB_SECURITY_DPONLY] = "dponly",
120 [TB_SECURITY_USBONLY] = "usbonly",
121 [TB_SECURITY_NOPCIE] = "nopcie",
122 };
123
124 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
125 char *buf)
126 {
127 struct tb *tb = container_of(dev, struct tb, dev);
128 uuid_t *uuids;
129 ssize_t ret;
130 int i;
131
132 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
133 if (!uuids)
134 return -ENOMEM;
135
136 pm_runtime_get_sync(&tb->dev);
137
138 if (mutex_lock_interruptible(&tb->lock)) {
139 ret = -ERESTARTSYS;
140 goto out;
141 }
142 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
143 if (ret) {
144 mutex_unlock(&tb->lock);
145 goto out;
146 }
147 mutex_unlock(&tb->lock);
148
149 for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
150 if (!uuid_is_null(&uuids[i]))
151 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
152 &uuids[i]);
153
154 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
155 i < tb->nboot_acl - 1 ? "," : "\n");
156 }
157
158 out:
159 pm_runtime_mark_last_busy(&tb->dev);
160 pm_runtime_put_autosuspend(&tb->dev);
161 kfree(uuids);
162
163 return ret;
164 }
165
166 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
167 const char *buf, size_t count)
168 {
169 struct tb *tb = container_of(dev, struct tb, dev);
170 char *str, *s, *uuid_str;
171 ssize_t ret = 0;
172 uuid_t *acl;
173 int i = 0;
174
175 /*
176 * Make sure the value is not bigger than tb->nboot_acl * UUID
177 * length + commas and optional "\n". Also the smallest allowable
178 * string is tb->nboot_acl * ",".
179 */
180 if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
181 return -EINVAL;
182 if (count < tb->nboot_acl - 1)
183 return -EINVAL;
184
185 str = kstrdup(buf, GFP_KERNEL);
186 if (!str)
187 return -ENOMEM;
188
189 acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
190 if (!acl) {
191 ret = -ENOMEM;
192 goto err_free_str;
193 }
194
195 uuid_str = strim(str);
196 while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
197 size_t len = strlen(s);
198
199 if (len) {
200 if (len != UUID_STRING_LEN) {
201 ret = -EINVAL;
202 goto err_free_acl;
203 }
204 ret = uuid_parse(s, &acl[i]);
205 if (ret)
206 goto err_free_acl;
207 }
208
209 i++;
210 }
211
212 if (s || i < tb->nboot_acl) {
213 ret = -EINVAL;
214 goto err_free_acl;
215 }
216
217 pm_runtime_get_sync(&tb->dev);
218
219 if (mutex_lock_interruptible(&tb->lock)) {
220 ret = -ERESTARTSYS;
221 goto err_rpm_put;
222 }
223 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
224 if (!ret) {
225 /* Notify userspace about the change */
226 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
227 }
228 mutex_unlock(&tb->lock);
229
230 err_rpm_put:
231 pm_runtime_mark_last_busy(&tb->dev);
232 pm_runtime_put_autosuspend(&tb->dev);
233 err_free_acl:
234 kfree(acl);
235 err_free_str:
236 kfree(str);
237
238 return ret ?: count;
239 }
240 static DEVICE_ATTR_RW(boot_acl);
241
242 static ssize_t deauthorization_show(struct device *dev,
243 struct device_attribute *attr,
244 char *buf)
245 {
246 const struct tb *tb = container_of(dev, struct tb, dev);
247 bool deauthorization = false;
248
249 /* Only meaningful if authorization is supported */
250 if (tb->security_level == TB_SECURITY_USER ||
251 tb->security_level == TB_SECURITY_SECURE)
252 deauthorization = !!tb->cm_ops->disapprove_switch;
253
254 return sprintf(buf, "%d\n", deauthorization);
255 }
256 static DEVICE_ATTR_RO(deauthorization);
257
258 static ssize_t iommu_dma_protection_show(struct device *dev,
259 struct device_attribute *attr,
260 char *buf)
261 {
262 /*
263 * Kernel DMA protection is a feature where Thunderbolt security is
264 * handled natively using IOMMU. It is enabled when IOMMU is
265 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
266 */
267 return sprintf(buf, "%d\n",
268 iommu_present(&pci_bus_type) && dmar_platform_optin());
269 }
270 static DEVICE_ATTR_RO(iommu_dma_protection);
271
272 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
273 char *buf)
274 {
275 struct tb *tb = container_of(dev, struct tb, dev);
276 const char *name = "unknown";
277
278 if (tb->security_level < ARRAY_SIZE(tb_security_names))
279 name = tb_security_names[tb->security_level];
280
281 return sprintf(buf, "%s\n", name);
282 }
283 static DEVICE_ATTR_RO(security);
284
285 static struct attribute *domain_attrs[] = {
286 &dev_attr_boot_acl.attr,
287 &dev_attr_deauthorization.attr,
288 &dev_attr_iommu_dma_protection.attr,
289 &dev_attr_security.attr,
290 NULL,
291 };
292
293 static umode_t domain_attr_is_visible(struct kobject *kobj,
294 struct attribute *attr, int n)
295 {
296 struct device *dev = kobj_to_dev(kobj);
297 struct tb *tb = container_of(dev, struct tb, dev);
298
299 if (attr == &dev_attr_boot_acl.attr) {
300 if (tb->nboot_acl &&
301 tb->cm_ops->get_boot_acl &&
302 tb->cm_ops->set_boot_acl)
303 return attr->mode;
304 return 0;
305 }
306
307 return attr->mode;
308 }
309
310 static const struct attribute_group domain_attr_group = {
311 .is_visible = domain_attr_is_visible,
312 .attrs = domain_attrs,
313 };
314
315 static const struct attribute_group *domain_attr_groups[] = {
316 &domain_attr_group,
317 NULL,
318 };
319
320 struct bus_type tb_bus_type = {
321 .name = "thunderbolt",
322 .match = tb_service_match,
323 .probe = tb_service_probe,
324 .remove = tb_service_remove,
325 .shutdown = tb_service_shutdown,
326 };
327
328 static void tb_domain_release(struct device *dev)
329 {
330 struct tb *tb = container_of(dev, struct tb, dev);
331
332 tb_ctl_free(tb->ctl);
333 destroy_workqueue(tb->wq);
334 ida_simple_remove(&tb_domain_ida, tb->index);
335 mutex_destroy(&tb->lock);
336 kfree(tb);
337 }
338
339 struct device_type tb_domain_type = {
340 .name = "thunderbolt_domain",
341 .release = tb_domain_release,
342 };
343
344 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
345 const void *buf, size_t size)
346 {
347 struct tb *tb = data;
348
349 if (!tb->cm_ops->handle_event) {
350 tb_warn(tb, "domain does not have event handler\n");
351 return true;
352 }
353
354 switch (type) {
355 case TB_CFG_PKG_XDOMAIN_REQ:
356 case TB_CFG_PKG_XDOMAIN_RESP:
357 if (tb_is_xdomain_enabled())
358 return tb_xdomain_handle_request(tb, type, buf, size);
359 break;
360
361 default:
362 tb->cm_ops->handle_event(tb, type, buf, size);
363 }
364
365 return true;
366 }
367
368 /**
369 * tb_domain_alloc() - Allocate a domain
370 * @nhi: Pointer to the host controller
371 * @timeout_msec: Control channel timeout for non-raw messages
372 * @privsize: Size of the connection manager private data
373 *
374 * Allocates and initializes a new Thunderbolt domain. Connection
375 * managers are expected to call this and then fill in @cm_ops
376 * accordingly.
377 *
378 * Call tb_domain_put() to release the domain before it has been added
379 * to the system.
380 *
381 * Return: allocated domain structure on %NULL in case of error
382 */
383 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
384 {
385 struct tb *tb;
386
387 /*
388 * Make sure the structure sizes map with that the hardware
389 * expects because bit-fields are being used.
390 */
391 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
392 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
393 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
394
395 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
396 if (!tb)
397 return NULL;
398
399 tb->nhi = nhi;
400 mutex_init(&tb->lock);
401
402 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
403 if (tb->index < 0)
404 goto err_free;
405
406 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
407 if (!tb->wq)
408 goto err_remove_ida;
409
410 tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
411 if (!tb->ctl)
412 goto err_destroy_wq;
413
414 tb->dev.parent = &nhi->pdev->dev;
415 tb->dev.bus = &tb_bus_type;
416 tb->dev.type = &tb_domain_type;
417 tb->dev.groups = domain_attr_groups;
418 dev_set_name(&tb->dev, "domain%d", tb->index);
419 device_initialize(&tb->dev);
420
421 return tb;
422
423 err_destroy_wq:
424 destroy_workqueue(tb->wq);
425 err_remove_ida:
426 ida_simple_remove(&tb_domain_ida, tb->index);
427 err_free:
428 kfree(tb);
429
430 return NULL;
431 }
432
433 /**
434 * tb_domain_add() - Add domain to the system
435 * @tb: Domain to add
436 *
437 * Starts the domain and adds it to the system. Hotplugging devices will
438 * work after this has been returned successfully. In order to remove
439 * and release the domain after this function has been called, call
440 * tb_domain_remove().
441 *
442 * Return: %0 in case of success and negative errno in case of error
443 */
444 int tb_domain_add(struct tb *tb)
445 {
446 int ret;
447
448 if (WARN_ON(!tb->cm_ops))
449 return -EINVAL;
450
451 mutex_lock(&tb->lock);
452 /*
453 * tb_schedule_hotplug_handler may be called as soon as the config
454 * channel is started. Thats why we have to hold the lock here.
455 */
456 tb_ctl_start(tb->ctl);
457
458 if (tb->cm_ops->driver_ready) {
459 ret = tb->cm_ops->driver_ready(tb);
460 if (ret)
461 goto err_ctl_stop;
462 }
463
464 tb_dbg(tb, "security level set to %s\n",
465 tb_security_names[tb->security_level]);
466
467 ret = device_add(&tb->dev);
468 if (ret)
469 goto err_ctl_stop;
470
471 /* Start the domain */
472 if (tb->cm_ops->start) {
473 ret = tb->cm_ops->start(tb);
474 if (ret)
475 goto err_domain_del;
476 }
477
478 /* This starts event processing */
479 mutex_unlock(&tb->lock);
480
481 device_init_wakeup(&tb->dev, true);
482
483 pm_runtime_no_callbacks(&tb->dev);
484 pm_runtime_set_active(&tb->dev);
485 pm_runtime_enable(&tb->dev);
486 pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
487 pm_runtime_mark_last_busy(&tb->dev);
488 pm_runtime_use_autosuspend(&tb->dev);
489
490 return 0;
491
492 err_domain_del:
493 device_del(&tb->dev);
494 err_ctl_stop:
495 tb_ctl_stop(tb->ctl);
496 mutex_unlock(&tb->lock);
497
498 return ret;
499 }
500
501 /**
502 * tb_domain_remove() - Removes and releases a domain
503 * @tb: Domain to remove
504 *
505 * Stops the domain, removes it from the system and releases all
506 * resources once the last reference has been released.
507 */
508 void tb_domain_remove(struct tb *tb)
509 {
510 mutex_lock(&tb->lock);
511 if (tb->cm_ops->stop)
512 tb->cm_ops->stop(tb);
513 /* Stop the domain control traffic */
514 tb_ctl_stop(tb->ctl);
515 mutex_unlock(&tb->lock);
516
517 flush_workqueue(tb->wq);
518 device_unregister(&tb->dev);
519 }
520
521 /**
522 * tb_domain_suspend_noirq() - Suspend a domain
523 * @tb: Domain to suspend
524 *
525 * Suspends all devices in the domain and stops the control channel.
526 */
527 int tb_domain_suspend_noirq(struct tb *tb)
528 {
529 int ret = 0;
530
531 /*
532 * The control channel interrupt is left enabled during suspend
533 * and taking the lock here prevents any events happening before
534 * we actually have stopped the domain and the control channel.
535 */
536 mutex_lock(&tb->lock);
537 if (tb->cm_ops->suspend_noirq)
538 ret = tb->cm_ops->suspend_noirq(tb);
539 if (!ret)
540 tb_ctl_stop(tb->ctl);
541 mutex_unlock(&tb->lock);
542
543 return ret;
544 }
545
546 /**
547 * tb_domain_resume_noirq() - Resume a domain
548 * @tb: Domain to resume
549 *
550 * Re-starts the control channel, and resumes all devices connected to
551 * the domain.
552 */
553 int tb_domain_resume_noirq(struct tb *tb)
554 {
555 int ret = 0;
556
557 mutex_lock(&tb->lock);
558 tb_ctl_start(tb->ctl);
559 if (tb->cm_ops->resume_noirq)
560 ret = tb->cm_ops->resume_noirq(tb);
561 mutex_unlock(&tb->lock);
562
563 return ret;
564 }
565
566 int tb_domain_suspend(struct tb *tb)
567 {
568 return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
569 }
570
571 int tb_domain_freeze_noirq(struct tb *tb)
572 {
573 int ret = 0;
574
575 mutex_lock(&tb->lock);
576 if (tb->cm_ops->freeze_noirq)
577 ret = tb->cm_ops->freeze_noirq(tb);
578 if (!ret)
579 tb_ctl_stop(tb->ctl);
580 mutex_unlock(&tb->lock);
581
582 return ret;
583 }
584
585 int tb_domain_thaw_noirq(struct tb *tb)
586 {
587 int ret = 0;
588
589 mutex_lock(&tb->lock);
590 tb_ctl_start(tb->ctl);
591 if (tb->cm_ops->thaw_noirq)
592 ret = tb->cm_ops->thaw_noirq(tb);
593 mutex_unlock(&tb->lock);
594
595 return ret;
596 }
597
598 void tb_domain_complete(struct tb *tb)
599 {
600 if (tb->cm_ops->complete)
601 tb->cm_ops->complete(tb);
602 }
603
604 int tb_domain_runtime_suspend(struct tb *tb)
605 {
606 if (tb->cm_ops->runtime_suspend) {
607 int ret = tb->cm_ops->runtime_suspend(tb);
608 if (ret)
609 return ret;
610 }
611 tb_ctl_stop(tb->ctl);
612 return 0;
613 }
614
615 int tb_domain_runtime_resume(struct tb *tb)
616 {
617 tb_ctl_start(tb->ctl);
618 if (tb->cm_ops->runtime_resume) {
619 int ret = tb->cm_ops->runtime_resume(tb);
620 if (ret)
621 return ret;
622 }
623 return 0;
624 }
625
626 /**
627 * tb_domain_disapprove_switch() - Disapprove switch
628 * @tb: Domain the switch belongs to
629 * @sw: Switch to disapprove
630 *
631 * This will disconnect PCIe tunnel from parent to this @sw.
632 *
633 * Return: %0 on success and negative errno in case of failure.
634 */
635 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
636 {
637 if (!tb->cm_ops->disapprove_switch)
638 return -EPERM;
639
640 return tb->cm_ops->disapprove_switch(tb, sw);
641 }
642
643 /**
644 * tb_domain_approve_switch() - Approve switch
645 * @tb: Domain the switch belongs to
646 * @sw: Switch to approve
647 *
648 * This will approve switch by connection manager specific means. In
649 * case of success the connection manager will create PCIe tunnel from
650 * parent to @sw.
651 */
652 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
653 {
654 struct tb_switch *parent_sw;
655
656 if (!tb->cm_ops->approve_switch)
657 return -EPERM;
658
659 /* The parent switch must be authorized before this one */
660 parent_sw = tb_to_switch(sw->dev.parent);
661 if (!parent_sw || !parent_sw->authorized)
662 return -EINVAL;
663
664 return tb->cm_ops->approve_switch(tb, sw);
665 }
666
667 /**
668 * tb_domain_approve_switch_key() - Approve switch and add key
669 * @tb: Domain the switch belongs to
670 * @sw: Switch to approve
671 *
672 * For switches that support secure connect, this function first adds
673 * key to the switch NVM using connection manager specific means. If
674 * adding the key is successful, the switch is approved and connected.
675 *
676 * Return: %0 on success and negative errno in case of failure.
677 */
678 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
679 {
680 struct tb_switch *parent_sw;
681 int ret;
682
683 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
684 return -EPERM;
685
686 /* The parent switch must be authorized before this one */
687 parent_sw = tb_to_switch(sw->dev.parent);
688 if (!parent_sw || !parent_sw->authorized)
689 return -EINVAL;
690
691 ret = tb->cm_ops->add_switch_key(tb, sw);
692 if (ret)
693 return ret;
694
695 return tb->cm_ops->approve_switch(tb, sw);
696 }
697
698 /**
699 * tb_domain_challenge_switch_key() - Challenge and approve switch
700 * @tb: Domain the switch belongs to
701 * @sw: Switch to approve
702 *
703 * For switches that support secure connect, this function generates
704 * random challenge and sends it to the switch. The switch responds to
705 * this and if the response matches our random challenge, the switch is
706 * approved and connected.
707 *
708 * Return: %0 on success and negative errno in case of failure.
709 */
710 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
711 {
712 u8 challenge[TB_SWITCH_KEY_SIZE];
713 u8 response[TB_SWITCH_KEY_SIZE];
714 u8 hmac[TB_SWITCH_KEY_SIZE];
715 struct tb_switch *parent_sw;
716 struct crypto_shash *tfm;
717 struct shash_desc *shash;
718 int ret;
719
720 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
721 return -EPERM;
722
723 /* The parent switch must be authorized before this one */
724 parent_sw = tb_to_switch(sw->dev.parent);
725 if (!parent_sw || !parent_sw->authorized)
726 return -EINVAL;
727
728 get_random_bytes(challenge, sizeof(challenge));
729 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
730 if (ret)
731 return ret;
732
733 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
734 if (IS_ERR(tfm))
735 return PTR_ERR(tfm);
736
737 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
738 if (ret)
739 goto err_free_tfm;
740
741 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
742 GFP_KERNEL);
743 if (!shash) {
744 ret = -ENOMEM;
745 goto err_free_tfm;
746 }
747
748 shash->tfm = tfm;
749
750 memset(hmac, 0, sizeof(hmac));
751 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
752 if (ret)
753 goto err_free_shash;
754
755 /* The returned HMAC must match the one we calculated */
756 if (memcmp(response, hmac, sizeof(hmac))) {
757 ret = -EKEYREJECTED;
758 goto err_free_shash;
759 }
760
761 crypto_free_shash(tfm);
762 kfree(shash);
763
764 return tb->cm_ops->approve_switch(tb, sw);
765
766 err_free_shash:
767 kfree(shash);
768 err_free_tfm:
769 crypto_free_shash(tfm);
770
771 return ret;
772 }
773
774 /**
775 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
776 * @tb: Domain whose PCIe paths to disconnect
777 *
778 * This needs to be called in preparation for NVM upgrade of the host
779 * controller. Makes sure all PCIe paths are disconnected.
780 *
781 * Return %0 on success and negative errno in case of error.
782 */
783 int tb_domain_disconnect_pcie_paths(struct tb *tb)
784 {
785 if (!tb->cm_ops->disconnect_pcie_paths)
786 return -EPERM;
787
788 return tb->cm_ops->disconnect_pcie_paths(tb);
789 }
790
791 /**
792 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
793 * @tb: Domain enabling the DMA paths
794 * @xd: XDomain DMA paths are created to
795 * @transmit_path: HopID we are using to send out packets
796 * @transmit_ring: DMA ring used to send out packets
797 * @receive_path: HopID the other end is using to send packets to us
798 * @receive_ring: DMA ring used to receive packets from @receive_path
799 *
800 * Calls connection manager specific method to enable DMA paths to the
801 * XDomain in question.
802 *
803 * Return: 0% in case of success and negative errno otherwise. In
804 * particular returns %-ENOTSUPP if the connection manager
805 * implementation does not support XDomains.
806 */
807 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
808 int transmit_path, int transmit_ring,
809 int receive_path, int receive_ring)
810 {
811 if (!tb->cm_ops->approve_xdomain_paths)
812 return -ENOTSUPP;
813
814 return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
815 transmit_ring, receive_path, receive_ring);
816 }
817
818 /**
819 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
820 * @tb: Domain disabling the DMA paths
821 * @xd: XDomain whose DMA paths are disconnected
822 * @transmit_path: HopID we are using to send out packets
823 * @transmit_ring: DMA ring used to send out packets
824 * @receive_path: HopID the other end is using to send packets to us
825 * @receive_ring: DMA ring used to receive packets from @receive_path
826 *
827 * Calls connection manager specific method to disconnect DMA paths to
828 * the XDomain in question.
829 *
830 * Return: 0% in case of success and negative errno otherwise. In
831 * particular returns %-ENOTSUPP if the connection manager
832 * implementation does not support XDomains.
833 */
834 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
835 int transmit_path, int transmit_ring,
836 int receive_path, int receive_ring)
837 {
838 if (!tb->cm_ops->disconnect_xdomain_paths)
839 return -ENOTSUPP;
840
841 return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
842 transmit_ring, receive_path, receive_ring);
843 }
844
845 static int disconnect_xdomain(struct device *dev, void *data)
846 {
847 struct tb_xdomain *xd;
848 struct tb *tb = data;
849 int ret = 0;
850
851 xd = tb_to_xdomain(dev);
852 if (xd && xd->tb == tb)
853 ret = tb_xdomain_disable_all_paths(xd);
854
855 return ret;
856 }
857
858 /**
859 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
860 * @tb: Domain whose paths are disconnected
861 *
862 * This function can be used to disconnect all paths (PCIe, XDomain) for
863 * example in preparation for host NVM firmware upgrade. After this is
864 * called the paths cannot be established without resetting the switch.
865 *
866 * Return: %0 in case of success and negative errno otherwise.
867 */
868 int tb_domain_disconnect_all_paths(struct tb *tb)
869 {
870 int ret;
871
872 ret = tb_domain_disconnect_pcie_paths(tb);
873 if (ret)
874 return ret;
875
876 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
877 }
878
879 int tb_domain_init(void)
880 {
881 int ret;
882
883 tb_test_init();
884
885 tb_debugfs_init();
886 ret = tb_xdomain_init();
887 if (ret)
888 goto err_debugfs;
889 ret = bus_register(&tb_bus_type);
890 if (ret)
891 goto err_xdomain;
892
893 return 0;
894
895 err_xdomain:
896 tb_xdomain_exit();
897 err_debugfs:
898 tb_debugfs_exit();
899 tb_test_exit();
900
901 return ret;
902 }
903
904 void tb_domain_exit(void)
905 {
906 bus_unregister(&tb_bus_type);
907 ida_destroy(&tb_domain_ida);
908 tb_nvm_exit();
909 tb_xdomain_exit();
910 tb_debugfs_exit();
911 tb_test_exit();
912 }