2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
7 #include <linux/crc32.h>
8 #include <linux/delay.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/dmapool.h>
12 #include <linux/workqueue.h>
17 #define TB_CTL_RX_PKG_COUNT 10
18 #define TB_CTL_RETRIES 4
21 * struct tb_cfg - thunderbolt control channel
28 struct dma_pool
*frame_pool
;
29 struct ctl_pkg
*rx_packets
[TB_CTL_RX_PKG_COUNT
];
30 struct mutex request_queue_lock
;
31 struct list_head request_queue
;
39 #define tb_ctl_WARN(ctl, format, arg...) \
40 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
42 #define tb_ctl_err(ctl, format, arg...) \
43 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
45 #define tb_ctl_warn(ctl, format, arg...) \
46 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
48 #define tb_ctl_info(ctl, format, arg...) \
49 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
51 #define tb_ctl_dbg(ctl, format, arg...) \
52 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
54 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue
);
55 /* Serializes access to request kref_get/put */
56 static DEFINE_MUTEX(tb_cfg_request_lock
);
59 * tb_cfg_request_alloc() - Allocates a new config request
61 * This is refcounted object so when you are done with this, call
62 * tb_cfg_request_put() to it.
64 struct tb_cfg_request
*tb_cfg_request_alloc(void)
66 struct tb_cfg_request
*req
;
68 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
72 kref_init(&req
->kref
);
78 * tb_cfg_request_get() - Increase refcount of a request
79 * @req: Request whose refcount is increased
81 void tb_cfg_request_get(struct tb_cfg_request
*req
)
83 mutex_lock(&tb_cfg_request_lock
);
85 mutex_unlock(&tb_cfg_request_lock
);
88 static void tb_cfg_request_destroy(struct kref
*kref
)
90 struct tb_cfg_request
*req
= container_of(kref
, typeof(*req
), kref
);
96 * tb_cfg_request_put() - Decrease refcount and possibly release the request
97 * @req: Request whose refcount is decreased
99 * Call this function when you are done with the request. When refcount
100 * goes to %0 the object is released.
102 void tb_cfg_request_put(struct tb_cfg_request
*req
)
104 mutex_lock(&tb_cfg_request_lock
);
105 kref_put(&req
->kref
, tb_cfg_request_destroy
);
106 mutex_unlock(&tb_cfg_request_lock
);
109 static int tb_cfg_request_enqueue(struct tb_ctl
*ctl
,
110 struct tb_cfg_request
*req
)
112 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE
, &req
->flags
));
115 mutex_lock(&ctl
->request_queue_lock
);
117 mutex_unlock(&ctl
->request_queue_lock
);
121 list_add_tail(&req
->list
, &ctl
->request_queue
);
122 set_bit(TB_CFG_REQUEST_ACTIVE
, &req
->flags
);
123 mutex_unlock(&ctl
->request_queue_lock
);
127 static void tb_cfg_request_dequeue(struct tb_cfg_request
*req
)
129 struct tb_ctl
*ctl
= req
->ctl
;
131 mutex_lock(&ctl
->request_queue_lock
);
132 list_del(&req
->list
);
133 clear_bit(TB_CFG_REQUEST_ACTIVE
, &req
->flags
);
134 if (test_bit(TB_CFG_REQUEST_CANCELED
, &req
->flags
))
135 wake_up(&tb_cfg_request_cancel_queue
);
136 mutex_unlock(&ctl
->request_queue_lock
);
139 static bool tb_cfg_request_is_active(struct tb_cfg_request
*req
)
141 return test_bit(TB_CFG_REQUEST_ACTIVE
, &req
->flags
);
144 static struct tb_cfg_request
*
145 tb_cfg_request_find(struct tb_ctl
*ctl
, struct ctl_pkg
*pkg
)
147 struct tb_cfg_request
*req
;
150 mutex_lock(&pkg
->ctl
->request_queue_lock
);
151 list_for_each_entry(req
, &pkg
->ctl
->request_queue
, list
) {
152 tb_cfg_request_get(req
);
153 if (req
->match(req
, pkg
)) {
157 tb_cfg_request_put(req
);
159 mutex_unlock(&pkg
->ctl
->request_queue_lock
);
161 return found
? req
: NULL
;
164 /* utility functions */
167 static int check_header(const struct ctl_pkg
*pkg
, u32 len
,
168 enum tb_cfg_pkg_type type
, u64 route
)
170 struct tb_cfg_header
*header
= pkg
->buffer
;
172 /* check frame, TODO: frame flags */
173 if (WARN(len
!= pkg
->frame
.size
,
174 "wrong framesize (expected %#x, got %#x)\n",
175 len
, pkg
->frame
.size
))
177 if (WARN(type
!= pkg
->frame
.eof
, "wrong eof (expected %#x, got %#x)\n",
178 type
, pkg
->frame
.eof
))
180 if (WARN(pkg
->frame
.sof
, "wrong sof (expected 0x0, got %#x)\n",
185 if (WARN(header
->unknown
!= 1 << 9,
186 "header->unknown is %#x\n", header
->unknown
))
188 if (WARN(route
!= tb_cfg_get_route(header
),
189 "wrong route (expected %llx, got %llx)",
190 route
, tb_cfg_get_route(header
)))
195 static int check_config_address(struct tb_cfg_address addr
,
196 enum tb_cfg_space space
, u32 offset
,
199 if (WARN(addr
.zero
, "addr.zero is %#x\n", addr
.zero
))
201 if (WARN(space
!= addr
.space
, "wrong space (expected %x, got %x\n)",
204 if (WARN(offset
!= addr
.offset
, "wrong offset (expected %x, got %x\n)",
205 offset
, addr
.offset
))
207 if (WARN(length
!= addr
.length
, "wrong space (expected %x, got %x\n)",
208 length
, addr
.length
))
211 * We cannot check addr->port as it is set to the upstream port of the
217 static struct tb_cfg_result
decode_error(const struct ctl_pkg
*response
)
219 struct cfg_error_pkg
*pkg
= response
->buffer
;
220 struct tb_cfg_result res
= { 0 };
221 res
.response_route
= tb_cfg_get_route(&pkg
->header
);
222 res
.response_port
= 0;
223 res
.err
= check_header(response
, sizeof(*pkg
), TB_CFG_PKG_ERROR
,
224 tb_cfg_get_route(&pkg
->header
));
228 WARN(pkg
->zero1
, "pkg->zero1 is %#x\n", pkg
->zero1
);
229 WARN(pkg
->zero2
, "pkg->zero1 is %#x\n", pkg
->zero1
);
230 WARN(pkg
->zero3
, "pkg->zero1 is %#x\n", pkg
->zero1
);
232 res
.tb_error
= pkg
->error
;
233 res
.response_port
= pkg
->port
;
238 static struct tb_cfg_result
parse_header(const struct ctl_pkg
*pkg
, u32 len
,
239 enum tb_cfg_pkg_type type
, u64 route
)
241 struct tb_cfg_header
*header
= pkg
->buffer
;
242 struct tb_cfg_result res
= { 0 };
244 if (pkg
->frame
.eof
== TB_CFG_PKG_ERROR
)
245 return decode_error(pkg
);
247 res
.response_port
= 0; /* will be updated later for cfg_read/write */
248 res
.response_route
= tb_cfg_get_route(header
);
249 res
.err
= check_header(pkg
, len
, type
, route
);
253 static void tb_cfg_print_error(struct tb_ctl
*ctl
,
254 const struct tb_cfg_result
*res
)
256 WARN_ON(res
->err
!= 1);
257 switch (res
->tb_error
) {
258 case TB_CFG_ERROR_PORT_NOT_CONNECTED
:
259 /* Port is not connected. This can happen during surprise
260 * removal. Do not warn. */
262 case TB_CFG_ERROR_INVALID_CONFIG_SPACE
:
264 * Invalid cfg_space/offset/length combination in
265 * cfg_read/cfg_write.
268 "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
269 res
->response_route
, res
->response_port
);
271 case TB_CFG_ERROR_NO_SUCH_PORT
:
273 * - The route contains a non-existent port.
274 * - The route contains a non-PHY port (e.g. PCIe).
275 * - The port in cfg_read/cfg_write does not exist.
277 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Invalid port\n",
278 res
->response_route
, res
->response_port
);
280 case TB_CFG_ERROR_LOOP
:
281 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Route contains a loop\n",
282 res
->response_route
, res
->response_port
);
285 /* 5,6,7,9 and 11 are also valid error codes */
286 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Unknown error\n",
287 res
->response_route
, res
->response_port
);
292 static void cpu_to_be32_array(__be32
*dst
, const u32
*src
, size_t len
)
295 for (i
= 0; i
< len
; i
++)
296 dst
[i
] = cpu_to_be32(src
[i
]);
299 static void be32_to_cpu_array(u32
*dst
, __be32
*src
, size_t len
)
302 for (i
= 0; i
< len
; i
++)
303 dst
[i
] = be32_to_cpu(src
[i
]);
306 static __be32
tb_crc(const void *data
, size_t len
)
308 return cpu_to_be32(~__crc32c_le(~0, data
, len
));
311 static void tb_ctl_pkg_free(struct ctl_pkg
*pkg
)
314 dma_pool_free(pkg
->ctl
->frame_pool
,
315 pkg
->buffer
, pkg
->frame
.buffer_phy
);
320 static struct ctl_pkg
*tb_ctl_pkg_alloc(struct tb_ctl
*ctl
)
322 struct ctl_pkg
*pkg
= kzalloc(sizeof(*pkg
), GFP_KERNEL
);
326 pkg
->buffer
= dma_pool_alloc(ctl
->frame_pool
, GFP_KERNEL
,
327 &pkg
->frame
.buffer_phy
);
338 static void tb_ctl_tx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
341 struct ctl_pkg
*pkg
= container_of(frame
, typeof(*pkg
), frame
);
342 tb_ctl_pkg_free(pkg
);
346 * tb_cfg_tx() - transmit a packet on the control channel
348 * len must be a multiple of four.
350 * Return: Returns 0 on success or an error code on failure.
352 static int tb_ctl_tx(struct tb_ctl
*ctl
, const void *data
, size_t len
,
353 enum tb_cfg_pkg_type type
)
357 if (len
% 4 != 0) { /* required for le->be conversion */
358 tb_ctl_WARN(ctl
, "TX: invalid size: %zu\n", len
);
361 if (len
> TB_FRAME_SIZE
- 4) { /* checksum is 4 bytes */
362 tb_ctl_WARN(ctl
, "TX: packet too large: %zu/%d\n",
363 len
, TB_FRAME_SIZE
- 4);
366 pkg
= tb_ctl_pkg_alloc(ctl
);
369 pkg
->frame
.callback
= tb_ctl_tx_callback
;
370 pkg
->frame
.size
= len
+ 4;
371 pkg
->frame
.sof
= type
;
372 pkg
->frame
.eof
= type
;
373 cpu_to_be32_array(pkg
->buffer
, data
, len
/ 4);
374 *(__be32
*) (pkg
->buffer
+ len
) = tb_crc(pkg
->buffer
, len
);
376 res
= ring_tx(ctl
->tx
, &pkg
->frame
);
377 if (res
) /* ring is stopped */
378 tb_ctl_pkg_free(pkg
);
383 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
385 static void tb_ctl_handle_event(struct tb_ctl
*ctl
, enum tb_cfg_pkg_type type
,
386 struct ctl_pkg
*pkg
, size_t size
)
388 ctl
->callback(ctl
->callback_data
, type
, pkg
->buffer
, size
);
391 static void tb_ctl_rx_submit(struct ctl_pkg
*pkg
)
393 ring_rx(pkg
->ctl
->rx
, &pkg
->frame
); /*
394 * We ignore failures during stop.
395 * All rx packets are referenced
396 * from ctl->rx_packets, so we do
401 static int tb_async_error(const struct ctl_pkg
*pkg
)
403 const struct cfg_error_pkg
*error
= (const struct cfg_error_pkg
*)pkg
;
405 if (pkg
->frame
.eof
!= TB_CFG_PKG_ERROR
)
408 switch (error
->error
) {
409 case TB_CFG_ERROR_LINK_ERROR
:
410 case TB_CFG_ERROR_HEC_ERROR_DETECTED
:
411 case TB_CFG_ERROR_FLOW_CONTROL_ERROR
:
419 static void tb_ctl_rx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
422 struct ctl_pkg
*pkg
= container_of(frame
, typeof(*pkg
), frame
);
423 struct tb_cfg_request
*req
;
428 * ring is stopped, packet is referenced from
432 if (frame
->size
< 4 || frame
->size
% 4 != 0) {
433 tb_ctl_err(pkg
->ctl
, "RX: invalid size %#x, dropping packet\n",
438 frame
->size
-= 4; /* remove checksum */
439 crc32
= tb_crc(pkg
->buffer
, frame
->size
);
440 be32_to_cpu_array(pkg
->buffer
, pkg
->buffer
, frame
->size
/ 4);
442 switch (frame
->eof
) {
443 case TB_CFG_PKG_READ
:
444 case TB_CFG_PKG_WRITE
:
445 case TB_CFG_PKG_ERROR
:
446 case TB_CFG_PKG_OVERRIDE
:
447 case TB_CFG_PKG_RESET
:
448 if (*(__be32
*)(pkg
->buffer
+ frame
->size
) != crc32
) {
450 "RX: checksum mismatch, dropping packet\n");
453 if (tb_async_error(pkg
)) {
454 tb_ctl_handle_event(pkg
->ctl
, frame
->eof
,
460 case TB_CFG_PKG_EVENT
:
461 if (*(__be32
*)(pkg
->buffer
+ frame
->size
) != crc32
) {
463 "RX: checksum mismatch, dropping packet\n");
467 case TB_CFG_PKG_ICM_EVENT
:
468 tb_ctl_handle_event(pkg
->ctl
, frame
->eof
, pkg
, frame
->size
);
476 * The received packet will be processed only if there is an
477 * active request and that the packet is what is expected. This
478 * prevents packets such as replies coming after timeout has
479 * triggered from messing with the active requests.
481 req
= tb_cfg_request_find(pkg
->ctl
, pkg
);
483 if (req
->copy(req
, pkg
))
484 schedule_work(&req
->work
);
485 tb_cfg_request_put(req
);
489 tb_ctl_rx_submit(pkg
);
492 static void tb_cfg_request_work(struct work_struct
*work
)
494 struct tb_cfg_request
*req
= container_of(work
, typeof(*req
), work
);
496 if (!test_bit(TB_CFG_REQUEST_CANCELED
, &req
->flags
))
497 req
->callback(req
->callback_data
);
499 tb_cfg_request_dequeue(req
);
500 tb_cfg_request_put(req
);
504 * tb_cfg_request() - Start control request not waiting for it to complete
505 * @ctl: Control channel to use
506 * @req: Request to start
507 * @callback: Callback called when the request is completed
508 * @callback_data: Data to be passed to @callback
510 * This queues @req on the given control channel without waiting for it
511 * to complete. When the request completes @callback is called.
513 int tb_cfg_request(struct tb_ctl
*ctl
, struct tb_cfg_request
*req
,
514 void (*callback
)(void *), void *callback_data
)
519 req
->callback
= callback
;
520 req
->callback_data
= callback_data
;
521 INIT_WORK(&req
->work
, tb_cfg_request_work
);
522 INIT_LIST_HEAD(&req
->list
);
524 tb_cfg_request_get(req
);
525 ret
= tb_cfg_request_enqueue(ctl
, req
);
529 ret
= tb_ctl_tx(ctl
, req
->request
, req
->request_size
,
535 schedule_work(&req
->work
);
540 tb_cfg_request_dequeue(req
);
542 tb_cfg_request_put(req
);
548 * tb_cfg_request_cancel() - Cancel a control request
549 * @req: Request to cancel
550 * @err: Error to assign to the request
552 * This function can be used to cancel ongoing request. It will wait
553 * until the request is not active anymore.
555 void tb_cfg_request_cancel(struct tb_cfg_request
*req
, int err
)
557 set_bit(TB_CFG_REQUEST_CANCELED
, &req
->flags
);
558 schedule_work(&req
->work
);
559 wait_event(tb_cfg_request_cancel_queue
, !tb_cfg_request_is_active(req
));
560 req
->result
.err
= err
;
563 static void tb_cfg_request_complete(void *data
)
569 * tb_cfg_request_sync() - Start control request and wait until it completes
570 * @ctl: Control channel to use
571 * @req: Request to start
572 * @timeout_msec: Timeout how long to wait @req to complete
574 * Starts a control request and waits until it completes. If timeout
575 * triggers the request is canceled before function returns. Note the
576 * caller needs to make sure only one message for given switch is active
579 struct tb_cfg_result
tb_cfg_request_sync(struct tb_ctl
*ctl
,
580 struct tb_cfg_request
*req
,
583 unsigned long timeout
= msecs_to_jiffies(timeout_msec
);
584 struct tb_cfg_result res
= { 0 };
585 DECLARE_COMPLETION_ONSTACK(done
);
588 ret
= tb_cfg_request(ctl
, req
, tb_cfg_request_complete
, &done
);
594 if (!wait_for_completion_timeout(&done
, timeout
))
595 tb_cfg_request_cancel(req
, -ETIMEDOUT
);
597 flush_work(&req
->work
);
602 /* public interface, alloc/start/stop/free */
605 * tb_ctl_alloc() - allocate a control channel
607 * cb will be invoked once for every hot plug event.
609 * Return: Returns a pointer on success or NULL on failure.
611 struct tb_ctl
*tb_ctl_alloc(struct tb_nhi
*nhi
, event_cb cb
, void *cb_data
)
614 struct tb_ctl
*ctl
= kzalloc(sizeof(*ctl
), GFP_KERNEL
);
619 ctl
->callback_data
= cb_data
;
621 mutex_init(&ctl
->request_queue_lock
);
622 INIT_LIST_HEAD(&ctl
->request_queue
);
623 ctl
->frame_pool
= dma_pool_create("thunderbolt_ctl", &nhi
->pdev
->dev
,
624 TB_FRAME_SIZE
, 4, 0);
625 if (!ctl
->frame_pool
)
628 ctl
->tx
= ring_alloc_tx(nhi
, 0, 10, RING_FLAG_NO_SUSPEND
);
632 ctl
->rx
= ring_alloc_rx(nhi
, 0, 10, RING_FLAG_NO_SUSPEND
);
636 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++) {
637 ctl
->rx_packets
[i
] = tb_ctl_pkg_alloc(ctl
);
638 if (!ctl
->rx_packets
[i
])
640 ctl
->rx_packets
[i
]->frame
.callback
= tb_ctl_rx_callback
;
643 tb_ctl_info(ctl
, "control channel created\n");
651 * tb_ctl_free() - free a control channel
653 * Must be called after tb_ctl_stop.
655 * Must NOT be called from ctl->callback.
657 void tb_ctl_free(struct tb_ctl
*ctl
)
669 /* free RX packets */
670 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++)
671 tb_ctl_pkg_free(ctl
->rx_packets
[i
]);
675 dma_pool_destroy(ctl
->frame_pool
);
680 * tb_cfg_start() - start/resume the control channel
682 void tb_ctl_start(struct tb_ctl
*ctl
)
685 tb_ctl_info(ctl
, "control channel starting...\n");
686 ring_start(ctl
->tx
); /* is used to ack hotplug packets, start first */
688 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++)
689 tb_ctl_rx_submit(ctl
->rx_packets
[i
]);
695 * control() - pause the control channel
697 * All invocations of ctl->callback will have finished after this method
700 * Must NOT be called from ctl->callback.
702 void tb_ctl_stop(struct tb_ctl
*ctl
)
704 mutex_lock(&ctl
->request_queue_lock
);
705 ctl
->running
= false;
706 mutex_unlock(&ctl
->request_queue_lock
);
711 if (!list_empty(&ctl
->request_queue
))
712 tb_ctl_WARN(ctl
, "dangling request in request_queue\n");
713 INIT_LIST_HEAD(&ctl
->request_queue
);
714 tb_ctl_info(ctl
, "control channel stopped\n");
717 /* public interface, commands */
720 * tb_cfg_error() - send error packet
722 * Return: Returns 0 on success or an error code on failure.
724 int tb_cfg_error(struct tb_ctl
*ctl
, u64 route
, u32 port
,
725 enum tb_cfg_error error
)
727 struct cfg_error_pkg pkg
= {
728 .header
= tb_cfg_make_header(route
),
732 tb_ctl_info(ctl
, "resetting error on %llx:%x.\n", route
, port
);
733 return tb_ctl_tx(ctl
, &pkg
, sizeof(pkg
), TB_CFG_PKG_ERROR
);
736 static bool tb_cfg_match(const struct tb_cfg_request
*req
,
737 const struct ctl_pkg
*pkg
)
739 u64 route
= tb_cfg_get_route(pkg
->buffer
) & ~BIT_ULL(63);
741 if (pkg
->frame
.eof
== TB_CFG_PKG_ERROR
)
744 if (pkg
->frame
.eof
!= req
->response_type
)
746 if (route
!= tb_cfg_get_route(req
->request
))
748 if (pkg
->frame
.size
!= req
->response_size
)
751 if (pkg
->frame
.eof
== TB_CFG_PKG_READ
||
752 pkg
->frame
.eof
== TB_CFG_PKG_WRITE
) {
753 const struct cfg_read_pkg
*req_hdr
= req
->request
;
754 const struct cfg_read_pkg
*res_hdr
= pkg
->buffer
;
756 if (req_hdr
->addr
.seq
!= res_hdr
->addr
.seq
)
763 static bool tb_cfg_copy(struct tb_cfg_request
*req
, const struct ctl_pkg
*pkg
)
765 struct tb_cfg_result res
;
767 /* Now make sure it is in expected format */
768 res
= parse_header(pkg
, req
->response_size
, req
->response_type
,
769 tb_cfg_get_route(req
->request
));
771 memcpy(req
->response
, pkg
->buffer
, req
->response_size
);
775 /* Always complete when first response is received */
780 * tb_cfg_reset() - send a reset packet and wait for a response
782 * If the switch at route is incorrectly configured then we will not receive a
783 * reply (even though the switch will reset). The caller should check for
784 * -ETIMEDOUT and attempt to reconfigure the switch.
786 struct tb_cfg_result
tb_cfg_reset(struct tb_ctl
*ctl
, u64 route
,
789 struct cfg_reset_pkg request
= { .header
= tb_cfg_make_header(route
) };
790 struct tb_cfg_result res
= { 0 };
791 struct tb_cfg_header reply
;
792 struct tb_cfg_request
*req
;
794 req
= tb_cfg_request_alloc();
800 req
->match
= tb_cfg_match
;
801 req
->copy
= tb_cfg_copy
;
802 req
->request
= &request
;
803 req
->request_size
= sizeof(request
);
804 req
->request_type
= TB_CFG_PKG_RESET
;
805 req
->response
= &reply
;
806 req
->response_size
= sizeof(reply
);
807 req
->response_type
= sizeof(TB_CFG_PKG_RESET
);
809 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
811 tb_cfg_request_put(req
);
817 * tb_cfg_read() - read from config space into buffer
819 * Offset and length are in dwords.
821 struct tb_cfg_result
tb_cfg_read_raw(struct tb_ctl
*ctl
, void *buffer
,
822 u64 route
, u32 port
, enum tb_cfg_space space
,
823 u32 offset
, u32 length
, int timeout_msec
)
825 struct tb_cfg_result res
= { 0 };
826 struct cfg_read_pkg request
= {
827 .header
= tb_cfg_make_header(route
),
835 struct cfg_write_pkg reply
;
838 while (retries
< TB_CTL_RETRIES
) {
839 struct tb_cfg_request
*req
;
841 req
= tb_cfg_request_alloc();
847 request
.addr
.seq
= retries
++;
849 req
->match
= tb_cfg_match
;
850 req
->copy
= tb_cfg_copy
;
851 req
->request
= &request
;
852 req
->request_size
= sizeof(request
);
853 req
->request_type
= TB_CFG_PKG_READ
;
854 req
->response
= &reply
;
855 req
->response_size
= 12 + 4 * length
;
856 req
->response_type
= TB_CFG_PKG_READ
;
858 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
860 tb_cfg_request_put(req
);
862 if (res
.err
!= -ETIMEDOUT
)
865 /* Wait a bit (arbitrary time) until we send a retry */
866 usleep_range(10, 100);
872 res
.response_port
= reply
.addr
.port
;
873 res
.err
= check_config_address(reply
.addr
, space
, offset
, length
);
875 memcpy(buffer
, &reply
.data
, 4 * length
);
880 * tb_cfg_write() - write from buffer into config space
882 * Offset and length are in dwords.
884 struct tb_cfg_result
tb_cfg_write_raw(struct tb_ctl
*ctl
, const void *buffer
,
885 u64 route
, u32 port
, enum tb_cfg_space space
,
886 u32 offset
, u32 length
, int timeout_msec
)
888 struct tb_cfg_result res
= { 0 };
889 struct cfg_write_pkg request
= {
890 .header
= tb_cfg_make_header(route
),
898 struct cfg_read_pkg reply
;
901 memcpy(&request
.data
, buffer
, length
* 4);
903 while (retries
< TB_CTL_RETRIES
) {
904 struct tb_cfg_request
*req
;
906 req
= tb_cfg_request_alloc();
912 request
.addr
.seq
= retries
++;
914 req
->match
= tb_cfg_match
;
915 req
->copy
= tb_cfg_copy
;
916 req
->request
= &request
;
917 req
->request_size
= 12 + 4 * length
;
918 req
->request_type
= TB_CFG_PKG_WRITE
;
919 req
->response
= &reply
;
920 req
->response_size
= sizeof(reply
);
921 req
->response_type
= TB_CFG_PKG_WRITE
;
923 res
= tb_cfg_request_sync(ctl
, req
, timeout_msec
);
925 tb_cfg_request_put(req
);
927 if (res
.err
!= -ETIMEDOUT
)
930 /* Wait a bit (arbitrary time) until we send a retry */
931 usleep_range(10, 100);
937 res
.response_port
= reply
.addr
.port
;
938 res
.err
= check_config_address(reply
.addr
, space
, offset
, length
);
942 int tb_cfg_read(struct tb_ctl
*ctl
, void *buffer
, u64 route
, u32 port
,
943 enum tb_cfg_space space
, u32 offset
, u32 length
)
945 struct tb_cfg_result res
= tb_cfg_read_raw(ctl
, buffer
, route
, port
,
946 space
, offset
, length
, TB_CFG_DEFAULT_TIMEOUT
);
953 /* Thunderbolt error, tb_error holds the actual number */
954 tb_cfg_print_error(ctl
, &res
);
958 tb_ctl_warn(ctl
, "timeout reading config space %u from %#x\n",
963 WARN(1, "tb_cfg_read: %d\n", res
.err
);
969 int tb_cfg_write(struct tb_ctl
*ctl
, const void *buffer
, u64 route
, u32 port
,
970 enum tb_cfg_space space
, u32 offset
, u32 length
)
972 struct tb_cfg_result res
= tb_cfg_write_raw(ctl
, buffer
, route
, port
,
973 space
, offset
, length
, TB_CFG_DEFAULT_TIMEOUT
);
980 /* Thunderbolt error, tb_error holds the actual number */
981 tb_cfg_print_error(ctl
, &res
);
985 tb_ctl_warn(ctl
, "timeout writing config space %u to %#x\n",
990 WARN(1, "tb_cfg_write: %d\n", res
.err
);
997 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
999 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1000 * returns the port number from which the reply originated.
1002 * Return: Returns the upstream port number on success or an error code on
1005 int tb_cfg_get_upstream_port(struct tb_ctl
*ctl
, u64 route
)
1008 struct tb_cfg_result res
= tb_cfg_read_raw(ctl
, &dummy
, route
, 0,
1009 TB_CFG_SWITCH
, 0, 1,
1010 TB_CFG_DEFAULT_TIMEOUT
);
1015 return res
.response_port
;