2 * Thunderbolt bus support
4 * Copyright (C) 2017, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/device.h>
13 #include <linux/dmar.h>
14 #include <linux/idr.h>
15 #include <linux/iommu.h>
16 #include <linux/module.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <crypto/hash.h>
24 static DEFINE_IDA(tb_domain_ida
);
26 static bool match_service_id(const struct tb_service_id
*id
,
27 const struct tb_service
*svc
)
29 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_KEY
) {
30 if (strcmp(id
->protocol_key
, svc
->key
))
34 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_ID
) {
35 if (id
->protocol_id
!= svc
->prtcid
)
39 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
40 if (id
->protocol_version
!= svc
->prtcvers
)
44 if (id
->match_flags
& TBSVC_MATCH_PROTOCOL_VERSION
) {
45 if (id
->protocol_revision
!= svc
->prtcrevs
)
52 static const struct tb_service_id
*__tb_service_match(struct device
*dev
,
53 struct device_driver
*drv
)
55 struct tb_service_driver
*driver
;
56 const struct tb_service_id
*ids
;
57 struct tb_service
*svc
;
59 svc
= tb_to_service(dev
);
63 driver
= container_of(drv
, struct tb_service_driver
, driver
);
64 if (!driver
->id_table
)
67 for (ids
= driver
->id_table
; ids
->match_flags
!= 0; ids
++) {
68 if (match_service_id(ids
, svc
))
75 static int tb_service_match(struct device
*dev
, struct device_driver
*drv
)
77 return !!__tb_service_match(dev
, drv
);
80 static int tb_service_probe(struct device
*dev
)
82 struct tb_service
*svc
= tb_to_service(dev
);
83 struct tb_service_driver
*driver
;
84 const struct tb_service_id
*id
;
86 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
87 id
= __tb_service_match(dev
, &driver
->driver
);
89 return driver
->probe(svc
, id
);
92 static int tb_service_remove(struct device
*dev
)
94 struct tb_service
*svc
= tb_to_service(dev
);
95 struct tb_service_driver
*driver
;
97 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
104 static void tb_service_shutdown(struct device
*dev
)
106 struct tb_service_driver
*driver
;
107 struct tb_service
*svc
;
109 svc
= tb_to_service(dev
);
110 if (!svc
|| !dev
->driver
)
113 driver
= container_of(dev
->driver
, struct tb_service_driver
, driver
);
114 if (driver
->shutdown
)
115 driver
->shutdown(svc
);
118 static const char * const tb_security_names
[] = {
119 [TB_SECURITY_NONE
] = "none",
120 [TB_SECURITY_USER
] = "user",
121 [TB_SECURITY_SECURE
] = "secure",
122 [TB_SECURITY_DPONLY
] = "dponly",
123 [TB_SECURITY_USBONLY
] = "usbonly",
126 static ssize_t
boot_acl_show(struct device
*dev
, struct device_attribute
*attr
,
129 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
134 uuids
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
138 pm_runtime_get_sync(&tb
->dev
);
140 if (mutex_lock_interruptible(&tb
->lock
)) {
144 ret
= tb
->cm_ops
->get_boot_acl(tb
, uuids
, tb
->nboot_acl
);
146 mutex_unlock(&tb
->lock
);
149 mutex_unlock(&tb
->lock
);
151 for (ret
= 0, i
= 0; i
< tb
->nboot_acl
; i
++) {
152 if (!uuid_is_null(&uuids
[i
]))
153 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%pUb",
156 ret
+= snprintf(buf
+ ret
, PAGE_SIZE
- ret
, "%s",
157 i
< tb
->nboot_acl
- 1 ? "," : "\n");
161 pm_runtime_mark_last_busy(&tb
->dev
);
162 pm_runtime_put_autosuspend(&tb
->dev
);
168 static ssize_t
boot_acl_store(struct device
*dev
, struct device_attribute
*attr
,
169 const char *buf
, size_t count
)
171 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
172 char *str
, *s
, *uuid_str
;
178 * Make sure the value is not bigger than tb->nboot_acl * UUID
179 * length + commas and optional "\n". Also the smallest allowable
180 * string is tb->nboot_acl * ",".
182 if (count
> (UUID_STRING_LEN
+ 1) * tb
->nboot_acl
+ 1)
184 if (count
< tb
->nboot_acl
- 1)
187 str
= kstrdup(buf
, GFP_KERNEL
);
191 acl
= kcalloc(tb
->nboot_acl
, sizeof(uuid_t
), GFP_KERNEL
);
197 uuid_str
= strim(str
);
198 while ((s
= strsep(&uuid_str
, ",")) != NULL
&& i
< tb
->nboot_acl
) {
199 size_t len
= strlen(s
);
202 if (len
!= UUID_STRING_LEN
) {
206 ret
= uuid_parse(s
, &acl
[i
]);
214 if (s
|| i
< tb
->nboot_acl
) {
219 pm_runtime_get_sync(&tb
->dev
);
221 if (mutex_lock_interruptible(&tb
->lock
)) {
225 ret
= tb
->cm_ops
->set_boot_acl(tb
, acl
, tb
->nboot_acl
);
227 /* Notify userspace about the change */
228 kobject_uevent(&tb
->dev
.kobj
, KOBJ_CHANGE
);
230 mutex_unlock(&tb
->lock
);
233 pm_runtime_mark_last_busy(&tb
->dev
);
234 pm_runtime_put_autosuspend(&tb
->dev
);
242 static DEVICE_ATTR_RW(boot_acl
);
244 static ssize_t
iommu_dma_protection_show(struct device
*dev
,
245 struct device_attribute
*attr
,
249 * Kernel DMA protection is a feature where Thunderbolt security is
250 * handled natively using IOMMU. It is enabled when IOMMU is
251 * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
253 return sprintf(buf
, "%d\n",
254 iommu_present(&pci_bus_type
) && dmar_platform_optin());
256 static DEVICE_ATTR_RO(iommu_dma_protection
);
258 static ssize_t
security_show(struct device
*dev
, struct device_attribute
*attr
,
261 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
262 const char *name
= "unknown";
264 if (tb
->security_level
< ARRAY_SIZE(tb_security_names
))
265 name
= tb_security_names
[tb
->security_level
];
267 return sprintf(buf
, "%s\n", name
);
269 static DEVICE_ATTR_RO(security
);
271 static struct attribute
*domain_attrs
[] = {
272 &dev_attr_boot_acl
.attr
,
273 &dev_attr_iommu_dma_protection
.attr
,
274 &dev_attr_security
.attr
,
278 static umode_t
domain_attr_is_visible(struct kobject
*kobj
,
279 struct attribute
*attr
, int n
)
281 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
282 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
284 if (attr
== &dev_attr_boot_acl
.attr
) {
286 tb
->cm_ops
->get_boot_acl
&&
287 tb
->cm_ops
->set_boot_acl
)
295 static struct attribute_group domain_attr_group
= {
296 .is_visible
= domain_attr_is_visible
,
297 .attrs
= domain_attrs
,
300 static const struct attribute_group
*domain_attr_groups
[] = {
305 struct bus_type tb_bus_type
= {
306 .name
= "thunderbolt",
307 .match
= tb_service_match
,
308 .probe
= tb_service_probe
,
309 .remove
= tb_service_remove
,
310 .shutdown
= tb_service_shutdown
,
313 static void tb_domain_release(struct device
*dev
)
315 struct tb
*tb
= container_of(dev
, struct tb
, dev
);
317 tb_ctl_free(tb
->ctl
);
318 destroy_workqueue(tb
->wq
);
319 ida_simple_remove(&tb_domain_ida
, tb
->index
);
320 mutex_destroy(&tb
->lock
);
324 struct device_type tb_domain_type
= {
325 .name
= "thunderbolt_domain",
326 .release
= tb_domain_release
,
330 * tb_domain_alloc() - Allocate a domain
331 * @nhi: Pointer to the host controller
332 * @privsize: Size of the connection manager private data
334 * Allocates and initializes a new Thunderbolt domain. Connection
335 * managers are expected to call this and then fill in @cm_ops
338 * Call tb_domain_put() to release the domain before it has been added
341 * Return: allocated domain structure on %NULL in case of error
343 struct tb
*tb_domain_alloc(struct tb_nhi
*nhi
, size_t privsize
)
348 * Make sure the structure sizes map with that the hardware
349 * expects because bit-fields are being used.
351 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header
) != 5 * 4);
352 BUILD_BUG_ON(sizeof(struct tb_regs_port_header
) != 8 * 4);
353 BUILD_BUG_ON(sizeof(struct tb_regs_hop
) != 2 * 4);
355 tb
= kzalloc(sizeof(*tb
) + privsize
, GFP_KERNEL
);
360 mutex_init(&tb
->lock
);
362 tb
->index
= ida_simple_get(&tb_domain_ida
, 0, 0, GFP_KERNEL
);
366 tb
->wq
= alloc_ordered_workqueue("thunderbolt%d", 0, tb
->index
);
370 tb
->dev
.parent
= &nhi
->pdev
->dev
;
371 tb
->dev
.bus
= &tb_bus_type
;
372 tb
->dev
.type
= &tb_domain_type
;
373 tb
->dev
.groups
= domain_attr_groups
;
374 dev_set_name(&tb
->dev
, "domain%d", tb
->index
);
375 device_initialize(&tb
->dev
);
380 ida_simple_remove(&tb_domain_ida
, tb
->index
);
387 static bool tb_domain_event_cb(void *data
, enum tb_cfg_pkg_type type
,
388 const void *buf
, size_t size
)
390 struct tb
*tb
= data
;
392 if (!tb
->cm_ops
->handle_event
) {
393 tb_warn(tb
, "domain does not have event handler\n");
398 case TB_CFG_PKG_XDOMAIN_REQ
:
399 case TB_CFG_PKG_XDOMAIN_RESP
:
400 return tb_xdomain_handle_request(tb
, type
, buf
, size
);
403 tb
->cm_ops
->handle_event(tb
, type
, buf
, size
);
410 * tb_domain_add() - Add domain to the system
413 * Starts the domain and adds it to the system. Hotplugging devices will
414 * work after this has been returned successfully. In order to remove
415 * and release the domain after this function has been called, call
416 * tb_domain_remove().
418 * Return: %0 in case of success and negative errno in case of error
420 int tb_domain_add(struct tb
*tb
)
424 if (WARN_ON(!tb
->cm_ops
))
427 mutex_lock(&tb
->lock
);
429 tb
->ctl
= tb_ctl_alloc(tb
->nhi
, tb_domain_event_cb
, tb
);
436 * tb_schedule_hotplug_handler may be called as soon as the config
437 * channel is started. Thats why we have to hold the lock here.
439 tb_ctl_start(tb
->ctl
);
441 if (tb
->cm_ops
->driver_ready
) {
442 ret
= tb
->cm_ops
->driver_ready(tb
);
447 ret
= device_add(&tb
->dev
);
451 /* Start the domain */
452 if (tb
->cm_ops
->start
) {
453 ret
= tb
->cm_ops
->start(tb
);
458 /* This starts event processing */
459 mutex_unlock(&tb
->lock
);
461 pm_runtime_no_callbacks(&tb
->dev
);
462 pm_runtime_set_active(&tb
->dev
);
463 pm_runtime_enable(&tb
->dev
);
464 pm_runtime_set_autosuspend_delay(&tb
->dev
, TB_AUTOSUSPEND_DELAY
);
465 pm_runtime_mark_last_busy(&tb
->dev
);
466 pm_runtime_use_autosuspend(&tb
->dev
);
471 device_del(&tb
->dev
);
473 tb_ctl_stop(tb
->ctl
);
475 mutex_unlock(&tb
->lock
);
481 * tb_domain_remove() - Removes and releases a domain
482 * @tb: Domain to remove
484 * Stops the domain, removes it from the system and releases all
485 * resources once the last reference has been released.
487 void tb_domain_remove(struct tb
*tb
)
489 mutex_lock(&tb
->lock
);
490 if (tb
->cm_ops
->stop
)
491 tb
->cm_ops
->stop(tb
);
492 /* Stop the domain control traffic */
493 tb_ctl_stop(tb
->ctl
);
494 mutex_unlock(&tb
->lock
);
496 flush_workqueue(tb
->wq
);
497 device_unregister(&tb
->dev
);
501 * tb_domain_suspend_noirq() - Suspend a domain
502 * @tb: Domain to suspend
504 * Suspends all devices in the domain and stops the control channel.
506 int tb_domain_suspend_noirq(struct tb
*tb
)
511 * The control channel interrupt is left enabled during suspend
512 * and taking the lock here prevents any events happening before
513 * we actually have stopped the domain and the control channel.
515 mutex_lock(&tb
->lock
);
516 if (tb
->cm_ops
->suspend_noirq
)
517 ret
= tb
->cm_ops
->suspend_noirq(tb
);
519 tb_ctl_stop(tb
->ctl
);
520 mutex_unlock(&tb
->lock
);
526 * tb_domain_resume_noirq() - Resume a domain
527 * @tb: Domain to resume
529 * Re-starts the control channel, and resumes all devices connected to
532 int tb_domain_resume_noirq(struct tb
*tb
)
536 mutex_lock(&tb
->lock
);
537 tb_ctl_start(tb
->ctl
);
538 if (tb
->cm_ops
->resume_noirq
)
539 ret
= tb
->cm_ops
->resume_noirq(tb
);
540 mutex_unlock(&tb
->lock
);
545 int tb_domain_suspend(struct tb
*tb
)
547 return tb
->cm_ops
->suspend
? tb
->cm_ops
->suspend(tb
) : 0;
550 void tb_domain_complete(struct tb
*tb
)
552 if (tb
->cm_ops
->complete
)
553 tb
->cm_ops
->complete(tb
);
556 int tb_domain_runtime_suspend(struct tb
*tb
)
558 if (tb
->cm_ops
->runtime_suspend
) {
559 int ret
= tb
->cm_ops
->runtime_suspend(tb
);
563 tb_ctl_stop(tb
->ctl
);
567 int tb_domain_runtime_resume(struct tb
*tb
)
569 tb_ctl_start(tb
->ctl
);
570 if (tb
->cm_ops
->runtime_resume
) {
571 int ret
= tb
->cm_ops
->runtime_resume(tb
);
579 * tb_domain_approve_switch() - Approve switch
580 * @tb: Domain the switch belongs to
581 * @sw: Switch to approve
583 * This will approve switch by connection manager specific means. In
584 * case of success the connection manager will create tunnels for all
585 * supported protocols.
587 int tb_domain_approve_switch(struct tb
*tb
, struct tb_switch
*sw
)
589 struct tb_switch
*parent_sw
;
591 if (!tb
->cm_ops
->approve_switch
)
594 /* The parent switch must be authorized before this one */
595 parent_sw
= tb_to_switch(sw
->dev
.parent
);
596 if (!parent_sw
|| !parent_sw
->authorized
)
599 return tb
->cm_ops
->approve_switch(tb
, sw
);
603 * tb_domain_approve_switch_key() - Approve switch and add key
604 * @tb: Domain the switch belongs to
605 * @sw: Switch to approve
607 * For switches that support secure connect, this function first adds
608 * key to the switch NVM using connection manager specific means. If
609 * adding the key is successful, the switch is approved and connected.
611 * Return: %0 on success and negative errno in case of failure.
613 int tb_domain_approve_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
615 struct tb_switch
*parent_sw
;
618 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->add_switch_key
)
621 /* The parent switch must be authorized before this one */
622 parent_sw
= tb_to_switch(sw
->dev
.parent
);
623 if (!parent_sw
|| !parent_sw
->authorized
)
626 ret
= tb
->cm_ops
->add_switch_key(tb
, sw
);
630 return tb
->cm_ops
->approve_switch(tb
, sw
);
634 * tb_domain_challenge_switch_key() - Challenge and approve switch
635 * @tb: Domain the switch belongs to
636 * @sw: Switch to approve
638 * For switches that support secure connect, this function generates
639 * random challenge and sends it to the switch. The switch responds to
640 * this and if the response matches our random challenge, the switch is
641 * approved and connected.
643 * Return: %0 on success and negative errno in case of failure.
645 int tb_domain_challenge_switch_key(struct tb
*tb
, struct tb_switch
*sw
)
647 u8 challenge
[TB_SWITCH_KEY_SIZE
];
648 u8 response
[TB_SWITCH_KEY_SIZE
];
649 u8 hmac
[TB_SWITCH_KEY_SIZE
];
650 struct tb_switch
*parent_sw
;
651 struct crypto_shash
*tfm
;
652 struct shash_desc
*shash
;
655 if (!tb
->cm_ops
->approve_switch
|| !tb
->cm_ops
->challenge_switch_key
)
658 /* The parent switch must be authorized before this one */
659 parent_sw
= tb_to_switch(sw
->dev
.parent
);
660 if (!parent_sw
|| !parent_sw
->authorized
)
663 get_random_bytes(challenge
, sizeof(challenge
));
664 ret
= tb
->cm_ops
->challenge_switch_key(tb
, sw
, challenge
, response
);
668 tfm
= crypto_alloc_shash("hmac(sha256)", 0, 0);
672 ret
= crypto_shash_setkey(tfm
, sw
->key
, TB_SWITCH_KEY_SIZE
);
676 shash
= kzalloc(sizeof(*shash
) + crypto_shash_descsize(tfm
),
684 shash
->flags
= CRYPTO_TFM_REQ_MAY_SLEEP
;
686 memset(hmac
, 0, sizeof(hmac
));
687 ret
= crypto_shash_digest(shash
, challenge
, sizeof(hmac
), hmac
);
691 /* The returned HMAC must match the one we calculated */
692 if (memcmp(response
, hmac
, sizeof(hmac
))) {
697 crypto_free_shash(tfm
);
700 return tb
->cm_ops
->approve_switch(tb
, sw
);
705 crypto_free_shash(tfm
);
711 * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
712 * @tb: Domain whose PCIe paths to disconnect
714 * This needs to be called in preparation for NVM upgrade of the host
715 * controller. Makes sure all PCIe paths are disconnected.
717 * Return %0 on success and negative errno in case of error.
719 int tb_domain_disconnect_pcie_paths(struct tb
*tb
)
721 if (!tb
->cm_ops
->disconnect_pcie_paths
)
724 return tb
->cm_ops
->disconnect_pcie_paths(tb
);
728 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
729 * @tb: Domain enabling the DMA paths
730 * @xd: XDomain DMA paths are created to
732 * Calls connection manager specific method to enable DMA paths to the
733 * XDomain in question.
735 * Return: 0% in case of success and negative errno otherwise. In
736 * particular returns %-ENOTSUPP if the connection manager
737 * implementation does not support XDomains.
739 int tb_domain_approve_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
741 if (!tb
->cm_ops
->approve_xdomain_paths
)
744 return tb
->cm_ops
->approve_xdomain_paths(tb
, xd
);
748 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
749 * @tb: Domain disabling the DMA paths
750 * @xd: XDomain whose DMA paths are disconnected
752 * Calls connection manager specific method to disconnect DMA paths to
753 * the XDomain in question.
755 * Return: 0% in case of success and negative errno otherwise. In
756 * particular returns %-ENOTSUPP if the connection manager
757 * implementation does not support XDomains.
759 int tb_domain_disconnect_xdomain_paths(struct tb
*tb
, struct tb_xdomain
*xd
)
761 if (!tb
->cm_ops
->disconnect_xdomain_paths
)
764 return tb
->cm_ops
->disconnect_xdomain_paths(tb
, xd
);
767 static int disconnect_xdomain(struct device
*dev
, void *data
)
769 struct tb_xdomain
*xd
;
770 struct tb
*tb
= data
;
773 xd
= tb_to_xdomain(dev
);
774 if (xd
&& xd
->tb
== tb
)
775 ret
= tb_xdomain_disable_paths(xd
);
781 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
782 * @tb: Domain whose paths are disconnected
784 * This function can be used to disconnect all paths (PCIe, XDomain) for
785 * example in preparation for host NVM firmware upgrade. After this is
786 * called the paths cannot be established without resetting the switch.
788 * Return: %0 in case of success and negative errno otherwise.
790 int tb_domain_disconnect_all_paths(struct tb
*tb
)
794 ret
= tb_domain_disconnect_pcie_paths(tb
);
798 return bus_for_each_dev(&tb_bus_type
, NULL
, tb
, disconnect_xdomain
);
801 int tb_domain_init(void)
805 ret
= tb_xdomain_init();
808 ret
= bus_register(&tb_bus_type
);
815 void tb_domain_exit(void)
817 bus_unregister(&tb_bus_type
);
818 ida_destroy(&tb_domain_ida
);