]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/thunderbolt.h
Merge tag 'for-linus-4.15-rc8-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / include / linux / thunderbolt.h
1 /*
2 * Thunderbolt service API
3 *
4 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
5 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #ifndef THUNDERBOLT_H_
15 #define THUNDERBOLT_H_
16
17 #include <linux/device.h>
18 #include <linux/idr.h>
19 #include <linux/list.h>
20 #include <linux/mutex.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/pci.h>
23 #include <linux/uuid.h>
24 #include <linux/workqueue.h>
25
26 enum tb_cfg_pkg_type {
27 TB_CFG_PKG_READ = 1,
28 TB_CFG_PKG_WRITE = 2,
29 TB_CFG_PKG_ERROR = 3,
30 TB_CFG_PKG_NOTIFY_ACK = 4,
31 TB_CFG_PKG_EVENT = 5,
32 TB_CFG_PKG_XDOMAIN_REQ = 6,
33 TB_CFG_PKG_XDOMAIN_RESP = 7,
34 TB_CFG_PKG_OVERRIDE = 8,
35 TB_CFG_PKG_RESET = 9,
36 TB_CFG_PKG_ICM_EVENT = 10,
37 TB_CFG_PKG_ICM_CMD = 11,
38 TB_CFG_PKG_ICM_RESP = 12,
39 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
40 };
41
42 /**
43 * enum tb_security_level - Thunderbolt security level
44 * @TB_SECURITY_NONE: No security, legacy mode
45 * @TB_SECURITY_USER: User approval required at minimum
46 * @TB_SECURITY_SECURE: One time saved key required at minimum
47 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
48 */
49 enum tb_security_level {
50 TB_SECURITY_NONE,
51 TB_SECURITY_USER,
52 TB_SECURITY_SECURE,
53 TB_SECURITY_DPONLY,
54 };
55
56 /**
57 * struct tb - main thunderbolt bus structure
58 * @dev: Domain device
59 * @lock: Big lock. Must be held when accessing any struct
60 * tb_switch / struct tb_port.
61 * @nhi: Pointer to the NHI structure
62 * @ctl: Control channel for this domain
63 * @wq: Ordered workqueue for all domain specific work
64 * @root_switch: Root switch of this domain
65 * @cm_ops: Connection manager specific operations vector
66 * @index: Linux assigned domain number
67 * @security_level: Current security level
68 * @privdata: Private connection manager specific data
69 */
70 struct tb {
71 struct device dev;
72 struct mutex lock;
73 struct tb_nhi *nhi;
74 struct tb_ctl *ctl;
75 struct workqueue_struct *wq;
76 struct tb_switch *root_switch;
77 const struct tb_cm_ops *cm_ops;
78 int index;
79 enum tb_security_level security_level;
80 unsigned long privdata[0];
81 };
82
83 extern struct bus_type tb_bus_type;
84 extern struct device_type tb_service_type;
85 extern struct device_type tb_xdomain_type;
86
87 #define TB_LINKS_PER_PHY_PORT 2
88
89 static inline unsigned int tb_phy_port_from_link(unsigned int link)
90 {
91 return (link - 1) / TB_LINKS_PER_PHY_PORT;
92 }
93
94 /**
95 * struct tb_property_dir - XDomain property directory
96 * @uuid: Directory UUID or %NULL if root directory
97 * @properties: List of properties in this directory
98 *
99 * User needs to provide serialization if needed.
100 */
101 struct tb_property_dir {
102 const uuid_t *uuid;
103 struct list_head properties;
104 };
105
106 enum tb_property_type {
107 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
108 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
109 TB_PROPERTY_TYPE_DATA = 0x64,
110 TB_PROPERTY_TYPE_TEXT = 0x74,
111 TB_PROPERTY_TYPE_VALUE = 0x76,
112 };
113
114 #define TB_PROPERTY_KEY_SIZE 8
115
116 /**
117 * struct tb_property - XDomain property
118 * @list: Used to link properties together in a directory
119 * @key: Key for the property (always terminated).
120 * @type: Type of the property
121 * @length: Length of the property data in dwords
122 * @value: Property value
123 *
124 * Users use @type to determine which field in @value is filled.
125 */
126 struct tb_property {
127 struct list_head list;
128 char key[TB_PROPERTY_KEY_SIZE + 1];
129 enum tb_property_type type;
130 size_t length;
131 union {
132 struct tb_property_dir *dir;
133 u8 *data;
134 char *text;
135 u32 immediate;
136 } value;
137 };
138
139 struct tb_property_dir *tb_property_parse_dir(const u32 *block,
140 size_t block_len);
141 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
142 size_t block_len);
143 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
144 void tb_property_free_dir(struct tb_property_dir *dir);
145 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
146 u32 value);
147 int tb_property_add_data(struct tb_property_dir *parent, const char *key,
148 const void *buf, size_t buflen);
149 int tb_property_add_text(struct tb_property_dir *parent, const char *key,
150 const char *text);
151 int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
152 struct tb_property_dir *dir);
153 void tb_property_remove(struct tb_property *tb_property);
154 struct tb_property *tb_property_find(struct tb_property_dir *dir,
155 const char *key, enum tb_property_type type);
156 struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
157 struct tb_property *prev);
158
159 #define tb_property_for_each(dir, property) \
160 for (property = tb_property_get_next(dir, NULL); \
161 property; \
162 property = tb_property_get_next(dir, property))
163
164 int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
165 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
166
167 /**
168 * struct tb_xdomain - Cross-domain (XDomain) connection
169 * @dev: XDomain device
170 * @tb: Pointer to the domain
171 * @remote_uuid: UUID of the remote domain (host)
172 * @local_uuid: Cached local UUID
173 * @route: Route string the other domain can be reached
174 * @vendor: Vendor ID of the remote domain
175 * @device: Device ID of the demote domain
176 * @lock: Lock to serialize access to the following fields of this structure
177 * @vendor_name: Name of the vendor (or %NULL if not known)
178 * @device_name: Name of the device (or %NULL if not known)
179 * @is_unplugged: The XDomain is unplugged
180 * @resume: The XDomain is being resumed
181 * @transmit_path: HopID which the remote end expects us to transmit
182 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
183 * @receive_path: HopID which we expect the remote end to transmit
184 * @receive_ring: Local ring (hop) where incoming packets arrive
185 * @service_ids: Used to generate IDs for the services
186 * @properties: Properties exported by the remote domain
187 * @property_block_gen: Generation of @properties
188 * @properties_lock: Lock protecting @properties.
189 * @get_properties_work: Work used to get remote domain properties
190 * @properties_retries: Number of times left to read properties
191 * @properties_changed_work: Work used to notify the remote domain that
192 * our properties have changed
193 * @properties_changed_retries: Number of times left to send properties
194 * changed notification
195 * @link: Root switch link the remote domain is connected (ICM only)
196 * @depth: Depth in the chain the remote domain is connected (ICM only)
197 *
198 * This structure represents connection across two domains (hosts).
199 * Each XDomain contains zero or more services which are exposed as
200 * &struct tb_service objects.
201 *
202 * Service drivers may access this structure if they need to enumerate
203 * non-standard properties but they need hold @lock when doing so
204 * because properties can be changed asynchronously in response to
205 * changes in the remote domain.
206 */
207 struct tb_xdomain {
208 struct device dev;
209 struct tb *tb;
210 uuid_t *remote_uuid;
211 const uuid_t *local_uuid;
212 u64 route;
213 u16 vendor;
214 u16 device;
215 struct mutex lock;
216 const char *vendor_name;
217 const char *device_name;
218 bool is_unplugged;
219 bool resume;
220 u16 transmit_path;
221 u16 transmit_ring;
222 u16 receive_path;
223 u16 receive_ring;
224 struct ida service_ids;
225 struct tb_property_dir *properties;
226 u32 property_block_gen;
227 struct delayed_work get_properties_work;
228 int properties_retries;
229 struct delayed_work properties_changed_work;
230 int properties_changed_retries;
231 u8 link;
232 u8 depth;
233 };
234
235 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
236 u16 transmit_ring, u16 receive_path,
237 u16 receive_ring);
238 int tb_xdomain_disable_paths(struct tb_xdomain *xd);
239 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
240
241 static inline struct tb_xdomain *
242 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
243 {
244 struct tb_xdomain *xd;
245
246 mutex_lock(&tb->lock);
247 xd = tb_xdomain_find_by_uuid(tb, uuid);
248 mutex_unlock(&tb->lock);
249
250 return xd;
251 }
252
253 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
254 {
255 if (xd)
256 get_device(&xd->dev);
257 return xd;
258 }
259
260 static inline void tb_xdomain_put(struct tb_xdomain *xd)
261 {
262 if (xd)
263 put_device(&xd->dev);
264 }
265
266 static inline bool tb_is_xdomain(const struct device *dev)
267 {
268 return dev->type == &tb_xdomain_type;
269 }
270
271 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
272 {
273 if (tb_is_xdomain(dev))
274 return container_of(dev, struct tb_xdomain, dev);
275 return NULL;
276 }
277
278 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
279 size_t size, enum tb_cfg_pkg_type type);
280 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
281 size_t request_size, enum tb_cfg_pkg_type request_type,
282 void *response, size_t response_size,
283 enum tb_cfg_pkg_type response_type,
284 unsigned int timeout_msec);
285
286 /**
287 * tb_protocol_handler - Protocol specific handler
288 * @uuid: XDomain messages with this UUID are dispatched to this handler
289 * @callback: Callback called with the XDomain message. Returning %1
290 * here tells the XDomain core that the message was handled
291 * by this handler and should not be forwared to other
292 * handlers.
293 * @data: Data passed with the callback
294 * @list: Handlers are linked using this
295 *
296 * Thunderbolt services can hook into incoming XDomain requests by
297 * registering protocol handler. Only limitation is that the XDomain
298 * discovery protocol UUID cannot be registered since it is handled by
299 * the core XDomain code.
300 *
301 * The @callback must check that the message is really directed to the
302 * service the driver implements.
303 */
304 struct tb_protocol_handler {
305 const uuid_t *uuid;
306 int (*callback)(const void *buf, size_t size, void *data);
307 void *data;
308 struct list_head list;
309 };
310
311 int tb_register_protocol_handler(struct tb_protocol_handler *handler);
312 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
313
314 /**
315 * struct tb_service - Thunderbolt service
316 * @dev: XDomain device
317 * @id: ID of the service (shown in sysfs)
318 * @key: Protocol key from the properties directory
319 * @prtcid: Protocol ID from the properties directory
320 * @prtcvers: Protocol version from the properties directory
321 * @prtcrevs: Protocol software revision from the properties directory
322 * @prtcstns: Protocol settings mask from the properties directory
323 *
324 * Each domain exposes set of services it supports as collection of
325 * properties. For each service there will be one corresponding
326 * &struct tb_service. Service drivers are bound to these.
327 */
328 struct tb_service {
329 struct device dev;
330 int id;
331 const char *key;
332 u32 prtcid;
333 u32 prtcvers;
334 u32 prtcrevs;
335 u32 prtcstns;
336 };
337
338 static inline struct tb_service *tb_service_get(struct tb_service *svc)
339 {
340 if (svc)
341 get_device(&svc->dev);
342 return svc;
343 }
344
345 static inline void tb_service_put(struct tb_service *svc)
346 {
347 if (svc)
348 put_device(&svc->dev);
349 }
350
351 static inline bool tb_is_service(const struct device *dev)
352 {
353 return dev->type == &tb_service_type;
354 }
355
356 static inline struct tb_service *tb_to_service(struct device *dev)
357 {
358 if (tb_is_service(dev))
359 return container_of(dev, struct tb_service, dev);
360 return NULL;
361 }
362
363 /**
364 * tb_service_driver - Thunderbolt service driver
365 * @driver: Driver structure
366 * @probe: Called when the driver is probed
367 * @remove: Called when the driver is removed (optional)
368 * @shutdown: Called at shutdown time to stop the service (optional)
369 * @id_table: Table of service identifiers the driver supports
370 */
371 struct tb_service_driver {
372 struct device_driver driver;
373 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
374 void (*remove)(struct tb_service *svc);
375 void (*shutdown)(struct tb_service *svc);
376 const struct tb_service_id *id_table;
377 };
378
379 #define TB_SERVICE(key, id) \
380 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
381 TBSVC_MATCH_PROTOCOL_ID, \
382 .protocol_key = (key), \
383 .protocol_id = (id)
384
385 int tb_register_service_driver(struct tb_service_driver *drv);
386 void tb_unregister_service_driver(struct tb_service_driver *drv);
387
388 static inline void *tb_service_get_drvdata(const struct tb_service *svc)
389 {
390 return dev_get_drvdata(&svc->dev);
391 }
392
393 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
394 {
395 dev_set_drvdata(&svc->dev, data);
396 }
397
398 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
399 {
400 return tb_to_xdomain(svc->dev.parent);
401 }
402
403 /**
404 * struct tb_nhi - thunderbolt native host interface
405 * @lock: Must be held during ring creation/destruction. Is acquired by
406 * interrupt_work when dispatching interrupts to individual rings.
407 * @pdev: Pointer to the PCI device
408 * @iobase: MMIO space of the NHI
409 * @tx_rings: All Tx rings available on this host controller
410 * @rx_rings: All Rx rings available on this host controller
411 * @msix_ida: Used to allocate MSI-X vectors for rings
412 * @going_away: The host controller device is about to disappear so when
413 * this flag is set, avoid touching the hardware anymore.
414 * @interrupt_work: Work scheduled to handle ring interrupt when no
415 * MSI-X is used.
416 * @hop_count: Number of rings (end point hops) supported by NHI.
417 */
418 struct tb_nhi {
419 spinlock_t lock;
420 struct pci_dev *pdev;
421 void __iomem *iobase;
422 struct tb_ring **tx_rings;
423 struct tb_ring **rx_rings;
424 struct ida msix_ida;
425 bool going_away;
426 struct work_struct interrupt_work;
427 u32 hop_count;
428 };
429
430 /**
431 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
432 * @lock: Lock serializing actions to this ring. Must be acquired after
433 * nhi->lock.
434 * @nhi: Pointer to the native host controller interface
435 * @size: Size of the ring
436 * @hop: Hop (DMA channel) associated with this ring
437 * @head: Head of the ring (write next descriptor here)
438 * @tail: Tail of the ring (complete next descriptor here)
439 * @descriptors: Allocated descriptors for this ring
440 * @queue: Queue holding frames to be transferred over this ring
441 * @in_flight: Queue holding frames that are currently in flight
442 * @work: Interrupt work structure
443 * @is_tx: Is the ring Tx or Rx
444 * @running: Is the ring running
445 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
446 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
447 * @flags: Ring specific flags
448 * @sof_mask: Bit mask used to detect start of frame PDF
449 * @eof_mask: Bit mask used to detect end of frame PDF
450 * @start_poll: Called when ring interrupt is triggered to start
451 * polling. Passing %NULL keeps the ring in interrupt mode.
452 * @poll_data: Data passed to @start_poll
453 */
454 struct tb_ring {
455 spinlock_t lock;
456 struct tb_nhi *nhi;
457 int size;
458 int hop;
459 int head;
460 int tail;
461 struct ring_desc *descriptors;
462 dma_addr_t descriptors_dma;
463 struct list_head queue;
464 struct list_head in_flight;
465 struct work_struct work;
466 bool is_tx:1;
467 bool running:1;
468 int irq;
469 u8 vector;
470 unsigned int flags;
471 u16 sof_mask;
472 u16 eof_mask;
473 void (*start_poll)(void *data);
474 void *poll_data;
475 };
476
477 /* Leave ring interrupt enabled on suspend */
478 #define RING_FLAG_NO_SUSPEND BIT(0)
479 /* Configure the ring to be in frame mode */
480 #define RING_FLAG_FRAME BIT(1)
481 /* Enable end-to-end flow control */
482 #define RING_FLAG_E2E BIT(2)
483
484 struct ring_frame;
485 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
486
487 /**
488 * enum ring_desc_flags - Flags for DMA ring descriptor
489 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
490 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
491 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
492 * %RING_DESC_POSTED: Always set this
493 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
494 * %RING_DESC_INTERRUPT: Request an interrupt on completion
495 */
496 enum ring_desc_flags {
497 RING_DESC_ISOCH = 0x1,
498 RING_DESC_CRC_ERROR = 0x1,
499 RING_DESC_COMPLETED = 0x2,
500 RING_DESC_POSTED = 0x4,
501 RING_DESC_BUFFER_OVERRUN = 0x04,
502 RING_DESC_INTERRUPT = 0x8,
503 };
504
505 /**
506 * struct ring_frame - For use with ring_rx/ring_tx
507 * @buffer_phy: DMA mapped address of the frame
508 * @callback: Callback called when the frame is finished (optional)
509 * @list: Frame is linked to a queue using this
510 * @size: Size of the frame in bytes (%0 means %4096)
511 * @flags: Flags for the frame (see &enum ring_desc_flags)
512 * @eof: End of frame protocol defined field
513 * @sof: Start of frame protocol defined field
514 */
515 struct ring_frame {
516 dma_addr_t buffer_phy;
517 ring_cb callback;
518 struct list_head list;
519 u32 size:12;
520 u32 flags:12;
521 u32 eof:4;
522 u32 sof:4;
523 };
524
525 /* Minimum size for ring_rx */
526 #define TB_FRAME_SIZE 0x100
527
528 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
529 unsigned int flags);
530 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
531 unsigned int flags, u16 sof_mask, u16 eof_mask,
532 void (*start_poll)(void *), void *poll_data);
533 void tb_ring_start(struct tb_ring *ring);
534 void tb_ring_stop(struct tb_ring *ring);
535 void tb_ring_free(struct tb_ring *ring);
536
537 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
538
539 /**
540 * tb_ring_rx() - enqueue a frame on an RX ring
541 * @ring: Ring to enqueue the frame
542 * @frame: Frame to enqueue
543 *
544 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
545 * contain at least %TB_FRAME_SIZE bytes.
546 *
547 * @frame->callback will be invoked with @frame->size, @frame->flags,
548 * @frame->eof, @frame->sof set once the frame has been received.
549 *
550 * If ring_stop() is called after the packet has been enqueued
551 * @frame->callback will be called with canceled set to true.
552 *
553 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
554 */
555 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
556 {
557 WARN_ON(ring->is_tx);
558 return __tb_ring_enqueue(ring, frame);
559 }
560
561 /**
562 * tb_ring_tx() - enqueue a frame on an TX ring
563 * @ring: Ring the enqueue the frame
564 * @frame: Frame to enqueue
565 *
566 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
567 * @frame->sof have to be set.
568 *
569 * @frame->callback will be invoked with once the frame has been transmitted.
570 *
571 * If ring_stop() is called after the packet has been enqueued @frame->callback
572 * will be called with canceled set to true.
573 *
574 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
575 */
576 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
577 {
578 WARN_ON(!ring->is_tx);
579 return __tb_ring_enqueue(ring, frame);
580 }
581
582 /* Used only when the ring is in polling mode */
583 struct ring_frame *tb_ring_poll(struct tb_ring *ring);
584 void tb_ring_poll_complete(struct tb_ring *ring);
585
586 /**
587 * tb_ring_dma_device() - Return device used for DMA mapping
588 * @ring: Ring whose DMA device is retrieved
589 *
590 * Use this function when you are mapping DMA for buffers that are
591 * passed to the ring for sending/receiving.
592 */
593 static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
594 {
595 return &ring->nhi->pdev->dev;
596 }
597
598 #endif /* THUNDERBOLT_H_ */