]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f25bf6fc | 2 | /* |
15c6784c | 3 | * Thunderbolt driver - control channel and configuration commands |
f25bf6fc AN |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
15c6784c | 6 | * Copyright (C) 2018, Intel Corporation |
f25bf6fc AN |
7 | */ |
8 | ||
9 | #include <linux/crc32.h> | |
d7f781bf | 10 | #include <linux/delay.h> |
f25bf6fc AN |
11 | #include <linux/slab.h> |
12 | #include <linux/pci.h> | |
13 | #include <linux/dmapool.h> | |
14 | #include <linux/workqueue.h> | |
f25bf6fc AN |
15 | |
16 | #include "ctl.h" | |
17 | ||
18 | ||
d7f781bf MW |
19 | #define TB_CTL_RX_PKG_COUNT 10 |
20 | #define TB_CTL_RETRIES 4 | |
f25bf6fc AN |
21 | |
22 | /** | |
23 | * struct tb_cfg - thunderbolt control channel | |
24 | */ | |
25 | struct tb_ctl { | |
26 | struct tb_nhi *nhi; | |
27 | struct tb_ring *tx; | |
28 | struct tb_ring *rx; | |
29 | ||
30 | struct dma_pool *frame_pool; | |
31 | struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; | |
d7f781bf MW |
32 | struct mutex request_queue_lock; |
33 | struct list_head request_queue; | |
34 | bool running; | |
f25bf6fc | 35 | |
81a54b5e | 36 | event_cb callback; |
f25bf6fc AN |
37 | void *callback_data; |
38 | }; | |
39 | ||
40 | ||
41 | #define tb_ctl_WARN(ctl, format, arg...) \ | |
42 | dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) | |
43 | ||
44 | #define tb_ctl_err(ctl, format, arg...) \ | |
45 | dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) | |
46 | ||
47 | #define tb_ctl_warn(ctl, format, arg...) \ | |
48 | dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) | |
49 | ||
50 | #define tb_ctl_info(ctl, format, arg...) \ | |
51 | dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) | |
52 | ||
81a54b5e MW |
53 | #define tb_ctl_dbg(ctl, format, arg...) \ |
54 | dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) | |
55 | ||
d7f781bf MW |
56 | static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue); |
57 | /* Serializes access to request kref_get/put */ | |
58 | static DEFINE_MUTEX(tb_cfg_request_lock); | |
59 | ||
60 | /** | |
61 | * tb_cfg_request_alloc() - Allocates a new config request | |
62 | * | |
63 | * This is refcounted object so when you are done with this, call | |
64 | * tb_cfg_request_put() to it. | |
65 | */ | |
66 | struct tb_cfg_request *tb_cfg_request_alloc(void) | |
67 | { | |
68 | struct tb_cfg_request *req; | |
69 | ||
70 | req = kzalloc(sizeof(*req), GFP_KERNEL); | |
71 | if (!req) | |
72 | return NULL; | |
73 | ||
74 | kref_init(&req->kref); | |
75 | ||
76 | return req; | |
77 | } | |
78 | ||
79 | /** | |
80 | * tb_cfg_request_get() - Increase refcount of a request | |
81 | * @req: Request whose refcount is increased | |
82 | */ | |
83 | void tb_cfg_request_get(struct tb_cfg_request *req) | |
84 | { | |
85 | mutex_lock(&tb_cfg_request_lock); | |
86 | kref_get(&req->kref); | |
87 | mutex_unlock(&tb_cfg_request_lock); | |
88 | } | |
89 | ||
90 | static void tb_cfg_request_destroy(struct kref *kref) | |
91 | { | |
92 | struct tb_cfg_request *req = container_of(kref, typeof(*req), kref); | |
93 | ||
94 | kfree(req); | |
95 | } | |
96 | ||
97 | /** | |
98 | * tb_cfg_request_put() - Decrease refcount and possibly release the request | |
99 | * @req: Request whose refcount is decreased | |
100 | * | |
101 | * Call this function when you are done with the request. When refcount | |
102 | * goes to %0 the object is released. | |
103 | */ | |
104 | void tb_cfg_request_put(struct tb_cfg_request *req) | |
105 | { | |
106 | mutex_lock(&tb_cfg_request_lock); | |
107 | kref_put(&req->kref, tb_cfg_request_destroy); | |
108 | mutex_unlock(&tb_cfg_request_lock); | |
109 | } | |
110 | ||
111 | static int tb_cfg_request_enqueue(struct tb_ctl *ctl, | |
112 | struct tb_cfg_request *req) | |
113 | { | |
114 | WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); | |
115 | WARN_ON(req->ctl); | |
116 | ||
117 | mutex_lock(&ctl->request_queue_lock); | |
118 | if (!ctl->running) { | |
119 | mutex_unlock(&ctl->request_queue_lock); | |
120 | return -ENOTCONN; | |
121 | } | |
122 | req->ctl = ctl; | |
123 | list_add_tail(&req->list, &ctl->request_queue); | |
124 | set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | |
125 | mutex_unlock(&ctl->request_queue_lock); | |
126 | return 0; | |
127 | } | |
128 | ||
129 | static void tb_cfg_request_dequeue(struct tb_cfg_request *req) | |
130 | { | |
131 | struct tb_ctl *ctl = req->ctl; | |
132 | ||
133 | mutex_lock(&ctl->request_queue_lock); | |
134 | list_del(&req->list); | |
135 | clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | |
136 | if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) | |
137 | wake_up(&tb_cfg_request_cancel_queue); | |
138 | mutex_unlock(&ctl->request_queue_lock); | |
139 | } | |
140 | ||
141 | static bool tb_cfg_request_is_active(struct tb_cfg_request *req) | |
142 | { | |
143 | return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); | |
144 | } | |
145 | ||
146 | static struct tb_cfg_request * | |
147 | tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) | |
148 | { | |
149 | struct tb_cfg_request *req; | |
150 | bool found = false; | |
151 | ||
152 | mutex_lock(&pkg->ctl->request_queue_lock); | |
153 | list_for_each_entry(req, &pkg->ctl->request_queue, list) { | |
154 | tb_cfg_request_get(req); | |
155 | if (req->match(req, pkg)) { | |
156 | found = true; | |
157 | break; | |
158 | } | |
159 | tb_cfg_request_put(req); | |
160 | } | |
161 | mutex_unlock(&pkg->ctl->request_queue_lock); | |
162 | ||
163 | return found ? req : NULL; | |
164 | } | |
165 | ||
f25bf6fc AN |
166 | /* utility functions */ |
167 | ||
d7f781bf MW |
168 | |
169 | static int check_header(const struct ctl_pkg *pkg, u32 len, | |
170 | enum tb_cfg_pkg_type type, u64 route) | |
f25bf6fc AN |
171 | { |
172 | struct tb_cfg_header *header = pkg->buffer; | |
173 | ||
174 | /* check frame, TODO: frame flags */ | |
175 | if (WARN(len != pkg->frame.size, | |
176 | "wrong framesize (expected %#x, got %#x)\n", | |
177 | len, pkg->frame.size)) | |
178 | return -EIO; | |
179 | if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", | |
180 | type, pkg->frame.eof)) | |
181 | return -EIO; | |
182 | if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", | |
183 | pkg->frame.sof)) | |
184 | return -EIO; | |
185 | ||
186 | /* check header */ | |
187 | if (WARN(header->unknown != 1 << 9, | |
188 | "header->unknown is %#x\n", header->unknown)) | |
189 | return -EIO; | |
ac6c44de | 190 | if (WARN(route != tb_cfg_get_route(header), |
f25bf6fc | 191 | "wrong route (expected %llx, got %llx)", |
ac6c44de | 192 | route, tb_cfg_get_route(header))) |
f25bf6fc AN |
193 | return -EIO; |
194 | return 0; | |
195 | } | |
196 | ||
197 | static int check_config_address(struct tb_cfg_address addr, | |
198 | enum tb_cfg_space space, u32 offset, | |
199 | u32 length) | |
200 | { | |
201 | if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) | |
202 | return -EIO; | |
203 | if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", | |
204 | space, addr.space)) | |
205 | return -EIO; | |
206 | if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", | |
207 | offset, addr.offset)) | |
208 | return -EIO; | |
209 | if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", | |
210 | length, addr.length)) | |
211 | return -EIO; | |
f25bf6fc AN |
212 | /* |
213 | * We cannot check addr->port as it is set to the upstream port of the | |
214 | * sender. | |
215 | */ | |
216 | return 0; | |
217 | } | |
218 | ||
d7f781bf | 219 | static struct tb_cfg_result decode_error(const struct ctl_pkg *response) |
f25bf6fc AN |
220 | { |
221 | struct cfg_error_pkg *pkg = response->buffer; | |
22255bec | 222 | struct tb_ctl *ctl = response->ctl; |
f25bf6fc | 223 | struct tb_cfg_result res = { 0 }; |
ac6c44de | 224 | res.response_route = tb_cfg_get_route(&pkg->header); |
f25bf6fc AN |
225 | res.response_port = 0; |
226 | res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, | |
ac6c44de | 227 | tb_cfg_get_route(&pkg->header)); |
f25bf6fc AN |
228 | if (res.err) |
229 | return res; | |
230 | ||
22255bec MW |
231 | if (pkg->zero1) |
232 | tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1); | |
233 | if (pkg->zero2) | |
234 | tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2); | |
235 | if (pkg->zero3) | |
236 | tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3); | |
237 | ||
f25bf6fc AN |
238 | res.err = 1; |
239 | res.tb_error = pkg->error; | |
240 | res.response_port = pkg->port; | |
241 | return res; | |
242 | ||
243 | } | |
244 | ||
d7f781bf | 245 | static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, |
f25bf6fc AN |
246 | enum tb_cfg_pkg_type type, u64 route) |
247 | { | |
248 | struct tb_cfg_header *header = pkg->buffer; | |
249 | struct tb_cfg_result res = { 0 }; | |
250 | ||
251 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) | |
252 | return decode_error(pkg); | |
253 | ||
254 | res.response_port = 0; /* will be updated later for cfg_read/write */ | |
ac6c44de | 255 | res.response_route = tb_cfg_get_route(header); |
f25bf6fc AN |
256 | res.err = check_header(pkg, len, type, route); |
257 | return res; | |
258 | } | |
259 | ||
260 | static void tb_cfg_print_error(struct tb_ctl *ctl, | |
261 | const struct tb_cfg_result *res) | |
262 | { | |
263 | WARN_ON(res->err != 1); | |
264 | switch (res->tb_error) { | |
265 | case TB_CFG_ERROR_PORT_NOT_CONNECTED: | |
266 | /* Port is not connected. This can happen during surprise | |
267 | * removal. Do not warn. */ | |
268 | return; | |
269 | case TB_CFG_ERROR_INVALID_CONFIG_SPACE: | |
270 | /* | |
271 | * Invalid cfg_space/offset/length combination in | |
272 | * cfg_read/cfg_write. | |
273 | */ | |
fa1653d9 MW |
274 | tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n", |
275 | res->response_route, res->response_port); | |
f25bf6fc AN |
276 | return; |
277 | case TB_CFG_ERROR_NO_SUCH_PORT: | |
278 | /* | |
279 | * - The route contains a non-existent port. | |
280 | * - The route contains a non-PHY port (e.g. PCIe). | |
281 | * - The port in cfg_read/cfg_write does not exist. | |
282 | */ | |
283 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", | |
284 | res->response_route, res->response_port); | |
285 | return; | |
286 | case TB_CFG_ERROR_LOOP: | |
287 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", | |
288 | res->response_route, res->response_port); | |
289 | return; | |
80e7c5dd MW |
290 | case TB_CFG_ERROR_LOCK: |
291 | tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", | |
292 | res->response_route, res->response_port); | |
293 | return; | |
f25bf6fc AN |
294 | default: |
295 | /* 5,6,7,9 and 11 are also valid error codes */ | |
296 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", | |
297 | res->response_route, res->response_port); | |
298 | return; | |
299 | } | |
300 | } | |
301 | ||
d7f781bf | 302 | static __be32 tb_crc(const void *data, size_t len) |
f25bf6fc AN |
303 | { |
304 | return cpu_to_be32(~__crc32c_le(~0, data, len)); | |
305 | } | |
306 | ||
307 | static void tb_ctl_pkg_free(struct ctl_pkg *pkg) | |
308 | { | |
309 | if (pkg) { | |
310 | dma_pool_free(pkg->ctl->frame_pool, | |
311 | pkg->buffer, pkg->frame.buffer_phy); | |
312 | kfree(pkg); | |
313 | } | |
314 | } | |
315 | ||
316 | static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) | |
317 | { | |
318 | struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); | |
319 | if (!pkg) | |
8db353bd | 320 | return NULL; |
f25bf6fc AN |
321 | pkg->ctl = ctl; |
322 | pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, | |
323 | &pkg->frame.buffer_phy); | |
324 | if (!pkg->buffer) { | |
325 | kfree(pkg); | |
8db353bd | 326 | return NULL; |
f25bf6fc AN |
327 | } |
328 | return pkg; | |
329 | } | |
330 | ||
331 | ||
332 | /* RX/TX handling */ | |
333 | ||
334 | static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | |
335 | bool canceled) | |
336 | { | |
337 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | |
338 | tb_ctl_pkg_free(pkg); | |
339 | } | |
340 | ||
341 | /** | |
342 | * tb_cfg_tx() - transmit a packet on the control channel | |
343 | * | |
344 | * len must be a multiple of four. | |
345 | * | |
346 | * Return: Returns 0 on success or an error code on failure. | |
347 | */ | |
16a1258a | 348 | static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, |
f25bf6fc AN |
349 | enum tb_cfg_pkg_type type) |
350 | { | |
351 | int res; | |
352 | struct ctl_pkg *pkg; | |
353 | if (len % 4 != 0) { /* required for le->be conversion */ | |
354 | tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); | |
355 | return -EINVAL; | |
356 | } | |
357 | if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ | |
358 | tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", | |
359 | len, TB_FRAME_SIZE - 4); | |
360 | return -EINVAL; | |
361 | } | |
362 | pkg = tb_ctl_pkg_alloc(ctl); | |
363 | if (!pkg) | |
364 | return -ENOMEM; | |
365 | pkg->frame.callback = tb_ctl_tx_callback; | |
366 | pkg->frame.size = len + 4; | |
367 | pkg->frame.sof = type; | |
368 | pkg->frame.eof = type; | |
369 | cpu_to_be32_array(pkg->buffer, data, len / 4); | |
801dba53 | 370 | *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); |
f25bf6fc | 371 | |
3b3d9f4d | 372 | res = tb_ring_tx(ctl->tx, &pkg->frame); |
f25bf6fc AN |
373 | if (res) /* ring is stopped */ |
374 | tb_ctl_pkg_free(pkg); | |
375 | return res; | |
376 | } | |
377 | ||
378 | /** | |
81a54b5e | 379 | * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback |
f25bf6fc | 380 | */ |
d1ff7024 | 381 | static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, |
81a54b5e | 382 | struct ctl_pkg *pkg, size_t size) |
f25bf6fc | 383 | { |
d1ff7024 | 384 | return ctl->callback(ctl->callback_data, type, pkg->buffer, size); |
f25bf6fc AN |
385 | } |
386 | ||
387 | static void tb_ctl_rx_submit(struct ctl_pkg *pkg) | |
388 | { | |
3b3d9f4d | 389 | tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* |
f25bf6fc AN |
390 | * We ignore failures during stop. |
391 | * All rx packets are referenced | |
392 | * from ctl->rx_packets, so we do | |
393 | * not loose them. | |
394 | */ | |
395 | } | |
396 | ||
81a54b5e MW |
397 | static int tb_async_error(const struct ctl_pkg *pkg) |
398 | { | |
399 | const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; | |
400 | ||
401 | if (pkg->frame.eof != TB_CFG_PKG_ERROR) | |
402 | return false; | |
403 | ||
404 | switch (error->error) { | |
405 | case TB_CFG_ERROR_LINK_ERROR: | |
406 | case TB_CFG_ERROR_HEC_ERROR_DETECTED: | |
407 | case TB_CFG_ERROR_FLOW_CONTROL_ERROR: | |
408 | return true; | |
409 | ||
410 | default: | |
411 | return false; | |
412 | } | |
413 | } | |
414 | ||
f25bf6fc AN |
415 | static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, |
416 | bool canceled) | |
417 | { | |
418 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | |
d7f781bf | 419 | struct tb_cfg_request *req; |
81a54b5e | 420 | __be32 crc32; |
f25bf6fc AN |
421 | |
422 | if (canceled) | |
423 | return; /* | |
424 | * ring is stopped, packet is referenced from | |
425 | * ctl->rx_packets. | |
426 | */ | |
427 | ||
428 | if (frame->size < 4 || frame->size % 4 != 0) { | |
429 | tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", | |
430 | frame->size); | |
431 | goto rx; | |
432 | } | |
433 | ||
434 | frame->size -= 4; /* remove checksum */ | |
81a54b5e | 435 | crc32 = tb_crc(pkg->buffer, frame->size); |
f25bf6fc AN |
436 | be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); |
437 | ||
81a54b5e MW |
438 | switch (frame->eof) { |
439 | case TB_CFG_PKG_READ: | |
440 | case TB_CFG_PKG_WRITE: | |
441 | case TB_CFG_PKG_ERROR: | |
442 | case TB_CFG_PKG_OVERRIDE: | |
443 | case TB_CFG_PKG_RESET: | |
444 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { | |
445 | tb_ctl_err(pkg->ctl, | |
446 | "RX: checksum mismatch, dropping packet\n"); | |
447 | goto rx; | |
448 | } | |
449 | if (tb_async_error(pkg)) { | |
450 | tb_ctl_handle_event(pkg->ctl, frame->eof, | |
451 | pkg, frame->size); | |
452 | goto rx; | |
453 | } | |
454 | break; | |
455 | ||
456 | case TB_CFG_PKG_EVENT: | |
d1ff7024 MW |
457 | case TB_CFG_PKG_XDOMAIN_RESP: |
458 | case TB_CFG_PKG_XDOMAIN_REQ: | |
81a54b5e MW |
459 | if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { |
460 | tb_ctl_err(pkg->ctl, | |
461 | "RX: checksum mismatch, dropping packet\n"); | |
462 | goto rx; | |
463 | } | |
df561f66 | 464 | fallthrough; |
f67cf491 | 465 | case TB_CFG_PKG_ICM_EVENT: |
d1ff7024 MW |
466 | if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) |
467 | goto rx; | |
468 | break; | |
81a54b5e MW |
469 | |
470 | default: | |
d7f781bf | 471 | break; |
f25bf6fc | 472 | } |
81a54b5e | 473 | |
d7f781bf MW |
474 | /* |
475 | * The received packet will be processed only if there is an | |
476 | * active request and that the packet is what is expected. This | |
477 | * prevents packets such as replies coming after timeout has | |
478 | * triggered from messing with the active requests. | |
479 | */ | |
480 | req = tb_cfg_request_find(pkg->ctl, pkg); | |
481 | if (req) { | |
482 | if (req->copy(req, pkg)) | |
483 | schedule_work(&req->work); | |
484 | tb_cfg_request_put(req); | |
f25bf6fc | 485 | } |
d7f781bf | 486 | |
f25bf6fc AN |
487 | rx: |
488 | tb_ctl_rx_submit(pkg); | |
489 | } | |
490 | ||
d7f781bf MW |
491 | static void tb_cfg_request_work(struct work_struct *work) |
492 | { | |
493 | struct tb_cfg_request *req = container_of(work, typeof(*req), work); | |
494 | ||
495 | if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) | |
496 | req->callback(req->callback_data); | |
497 | ||
498 | tb_cfg_request_dequeue(req); | |
499 | tb_cfg_request_put(req); | |
500 | } | |
501 | ||
f25bf6fc | 502 | /** |
d7f781bf MW |
503 | * tb_cfg_request() - Start control request not waiting for it to complete |
504 | * @ctl: Control channel to use | |
505 | * @req: Request to start | |
506 | * @callback: Callback called when the request is completed | |
507 | * @callback_data: Data to be passed to @callback | |
508 | * | |
509 | * This queues @req on the given control channel without waiting for it | |
510 | * to complete. When the request completes @callback is called. | |
f25bf6fc | 511 | */ |
d7f781bf MW |
512 | int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, |
513 | void (*callback)(void *), void *callback_data) | |
f25bf6fc | 514 | { |
d7f781bf | 515 | int ret; |
f25bf6fc | 516 | |
d7f781bf MW |
517 | req->flags = 0; |
518 | req->callback = callback; | |
519 | req->callback_data = callback_data; | |
520 | INIT_WORK(&req->work, tb_cfg_request_work); | |
521 | INIT_LIST_HEAD(&req->list); | |
f25bf6fc | 522 | |
d7f781bf MW |
523 | tb_cfg_request_get(req); |
524 | ret = tb_cfg_request_enqueue(ctl, req); | |
525 | if (ret) | |
526 | goto err_put; | |
527 | ||
528 | ret = tb_ctl_tx(ctl, req->request, req->request_size, | |
529 | req->request_type); | |
530 | if (ret) | |
531 | goto err_dequeue; | |
532 | ||
533 | if (!req->response) | |
534 | schedule_work(&req->work); | |
535 | ||
536 | return 0; | |
537 | ||
538 | err_dequeue: | |
539 | tb_cfg_request_dequeue(req); | |
540 | err_put: | |
541 | tb_cfg_request_put(req); | |
542 | ||
543 | return ret; | |
544 | } | |
545 | ||
546 | /** | |
547 | * tb_cfg_request_cancel() - Cancel a control request | |
548 | * @req: Request to cancel | |
549 | * @err: Error to assign to the request | |
550 | * | |
551 | * This function can be used to cancel ongoing request. It will wait | |
552 | * until the request is not active anymore. | |
553 | */ | |
554 | void tb_cfg_request_cancel(struct tb_cfg_request *req, int err) | |
555 | { | |
556 | set_bit(TB_CFG_REQUEST_CANCELED, &req->flags); | |
557 | schedule_work(&req->work); | |
558 | wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req)); | |
559 | req->result.err = err; | |
f25bf6fc AN |
560 | } |
561 | ||
d7f781bf MW |
562 | static void tb_cfg_request_complete(void *data) |
563 | { | |
564 | complete(data); | |
565 | } | |
566 | ||
567 | /** | |
568 | * tb_cfg_request_sync() - Start control request and wait until it completes | |
569 | * @ctl: Control channel to use | |
570 | * @req: Request to start | |
571 | * @timeout_msec: Timeout how long to wait @req to complete | |
572 | * | |
573 | * Starts a control request and waits until it completes. If timeout | |
574 | * triggers the request is canceled before function returns. Note the | |
575 | * caller needs to make sure only one message for given switch is active | |
576 | * at a time. | |
577 | */ | |
578 | struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, | |
579 | struct tb_cfg_request *req, | |
580 | int timeout_msec) | |
581 | { | |
582 | unsigned long timeout = msecs_to_jiffies(timeout_msec); | |
583 | struct tb_cfg_result res = { 0 }; | |
584 | DECLARE_COMPLETION_ONSTACK(done); | |
585 | int ret; | |
586 | ||
587 | ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); | |
588 | if (ret) { | |
589 | res.err = ret; | |
590 | return res; | |
591 | } | |
592 | ||
593 | if (!wait_for_completion_timeout(&done, timeout)) | |
594 | tb_cfg_request_cancel(req, -ETIMEDOUT); | |
595 | ||
596 | flush_work(&req->work); | |
597 | ||
598 | return req->result; | |
599 | } | |
f25bf6fc AN |
600 | |
601 | /* public interface, alloc/start/stop/free */ | |
602 | ||
603 | /** | |
604 | * tb_ctl_alloc() - allocate a control channel | |
605 | * | |
606 | * cb will be invoked once for every hot plug event. | |
607 | * | |
608 | * Return: Returns a pointer on success or NULL on failure. | |
609 | */ | |
81a54b5e | 610 | struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) |
f25bf6fc AN |
611 | { |
612 | int i; | |
613 | struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); | |
614 | if (!ctl) | |
615 | return NULL; | |
616 | ctl->nhi = nhi; | |
617 | ctl->callback = cb; | |
618 | ctl->callback_data = cb_data; | |
619 | ||
d7f781bf MW |
620 | mutex_init(&ctl->request_queue_lock); |
621 | INIT_LIST_HEAD(&ctl->request_queue); | |
f25bf6fc AN |
622 | ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, |
623 | TB_FRAME_SIZE, 4, 0); | |
624 | if (!ctl->frame_pool) | |
625 | goto err; | |
626 | ||
3b3d9f4d | 627 | ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); |
f25bf6fc AN |
628 | if (!ctl->tx) |
629 | goto err; | |
630 | ||
afe704a2 MW |
631 | ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, |
632 | 0xffff, NULL, NULL); | |
f25bf6fc AN |
633 | if (!ctl->rx) |
634 | goto err; | |
635 | ||
636 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { | |
637 | ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); | |
638 | if (!ctl->rx_packets[i]) | |
639 | goto err; | |
640 | ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; | |
641 | } | |
642 | ||
daa5140f | 643 | tb_ctl_dbg(ctl, "control channel created\n"); |
f25bf6fc AN |
644 | return ctl; |
645 | err: | |
646 | tb_ctl_free(ctl); | |
647 | return NULL; | |
648 | } | |
649 | ||
650 | /** | |
651 | * tb_ctl_free() - free a control channel | |
652 | * | |
653 | * Must be called after tb_ctl_stop. | |
654 | * | |
655 | * Must NOT be called from ctl->callback. | |
656 | */ | |
657 | void tb_ctl_free(struct tb_ctl *ctl) | |
658 | { | |
659 | int i; | |
c9843ebb MW |
660 | |
661 | if (!ctl) | |
662 | return; | |
663 | ||
f25bf6fc | 664 | if (ctl->rx) |
3b3d9f4d | 665 | tb_ring_free(ctl->rx); |
f25bf6fc | 666 | if (ctl->tx) |
3b3d9f4d | 667 | tb_ring_free(ctl->tx); |
f25bf6fc AN |
668 | |
669 | /* free RX packets */ | |
670 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) | |
671 | tb_ctl_pkg_free(ctl->rx_packets[i]); | |
672 | ||
673 | ||
0bb5a1a2 | 674 | dma_pool_destroy(ctl->frame_pool); |
f25bf6fc AN |
675 | kfree(ctl); |
676 | } | |
677 | ||
678 | /** | |
679 | * tb_cfg_start() - start/resume the control channel | |
680 | */ | |
681 | void tb_ctl_start(struct tb_ctl *ctl) | |
682 | { | |
683 | int i; | |
daa5140f | 684 | tb_ctl_dbg(ctl, "control channel starting...\n"); |
3b3d9f4d MW |
685 | tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ |
686 | tb_ring_start(ctl->rx); | |
f25bf6fc AN |
687 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) |
688 | tb_ctl_rx_submit(ctl->rx_packets[i]); | |
d7f781bf MW |
689 | |
690 | ctl->running = true; | |
f25bf6fc AN |
691 | } |
692 | ||
693 | /** | |
694 | * control() - pause the control channel | |
695 | * | |
696 | * All invocations of ctl->callback will have finished after this method | |
697 | * returns. | |
698 | * | |
699 | * Must NOT be called from ctl->callback. | |
700 | */ | |
701 | void tb_ctl_stop(struct tb_ctl *ctl) | |
702 | { | |
d7f781bf MW |
703 | mutex_lock(&ctl->request_queue_lock); |
704 | ctl->running = false; | |
705 | mutex_unlock(&ctl->request_queue_lock); | |
706 | ||
3b3d9f4d MW |
707 | tb_ring_stop(ctl->rx); |
708 | tb_ring_stop(ctl->tx); | |
f25bf6fc | 709 | |
d7f781bf MW |
710 | if (!list_empty(&ctl->request_queue)) |
711 | tb_ctl_WARN(ctl, "dangling request in request_queue\n"); | |
712 | INIT_LIST_HEAD(&ctl->request_queue); | |
daa5140f | 713 | tb_ctl_dbg(ctl, "control channel stopped\n"); |
f25bf6fc AN |
714 | } |
715 | ||
716 | /* public interface, commands */ | |
717 | ||
718 | /** | |
210e9f56 MW |
719 | * tb_cfg_ack_plug() - Ack hot plug/unplug event |
720 | * @ctl: Control channel to use | |
721 | * @route: Router that originated the event | |
722 | * @port: Port where the hot plug/unplug happened | |
723 | * @unplug: Ack hot plug or unplug | |
f25bf6fc | 724 | * |
210e9f56 MW |
725 | * Call this as response for hot plug/unplug event to ack it. |
726 | * Returns %0 on success or an error code on failure. | |
f25bf6fc | 727 | */ |
210e9f56 | 728 | int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) |
f25bf6fc AN |
729 | { |
730 | struct cfg_error_pkg pkg = { | |
05c242e9 | 731 | .header = tb_cfg_make_header(route), |
f25bf6fc | 732 | .port = port, |
210e9f56 MW |
733 | .error = TB_CFG_ERROR_ACK_PLUG_EVENT, |
734 | .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG | |
735 | : TB_CFG_ERROR_PG_HOT_PLUG, | |
f25bf6fc | 736 | }; |
210e9f56 MW |
737 | tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n", |
738 | unplug ? "un" : "", route, port); | |
f25bf6fc AN |
739 | return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); |
740 | } | |
741 | ||
d7f781bf MW |
742 | static bool tb_cfg_match(const struct tb_cfg_request *req, |
743 | const struct ctl_pkg *pkg) | |
744 | { | |
745 | u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); | |
746 | ||
747 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) | |
748 | return true; | |
749 | ||
750 | if (pkg->frame.eof != req->response_type) | |
751 | return false; | |
752 | if (route != tb_cfg_get_route(req->request)) | |
753 | return false; | |
754 | if (pkg->frame.size != req->response_size) | |
755 | return false; | |
756 | ||
757 | if (pkg->frame.eof == TB_CFG_PKG_READ || | |
758 | pkg->frame.eof == TB_CFG_PKG_WRITE) { | |
759 | const struct cfg_read_pkg *req_hdr = req->request; | |
760 | const struct cfg_read_pkg *res_hdr = pkg->buffer; | |
761 | ||
762 | if (req_hdr->addr.seq != res_hdr->addr.seq) | |
763 | return false; | |
764 | } | |
765 | ||
766 | return true; | |
767 | } | |
768 | ||
769 | static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) | |
770 | { | |
771 | struct tb_cfg_result res; | |
772 | ||
773 | /* Now make sure it is in expected format */ | |
774 | res = parse_header(pkg, req->response_size, req->response_type, | |
775 | tb_cfg_get_route(req->request)); | |
776 | if (!res.err) | |
777 | memcpy(req->response, pkg->buffer, req->response_size); | |
778 | ||
779 | req->result = res; | |
780 | ||
781 | /* Always complete when first response is received */ | |
782 | return true; | |
783 | } | |
784 | ||
f25bf6fc AN |
785 | /** |
786 | * tb_cfg_reset() - send a reset packet and wait for a response | |
787 | * | |
788 | * If the switch at route is incorrectly configured then we will not receive a | |
789 | * reply (even though the switch will reset). The caller should check for | |
790 | * -ETIMEDOUT and attempt to reconfigure the switch. | |
791 | */ | |
792 | struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, | |
793 | int timeout_msec) | |
794 | { | |
05c242e9 | 795 | struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; |
d7f781bf | 796 | struct tb_cfg_result res = { 0 }; |
f25bf6fc | 797 | struct tb_cfg_header reply; |
d7f781bf MW |
798 | struct tb_cfg_request *req; |
799 | ||
800 | req = tb_cfg_request_alloc(); | |
801 | if (!req) { | |
802 | res.err = -ENOMEM; | |
803 | return res; | |
804 | } | |
805 | ||
806 | req->match = tb_cfg_match; | |
807 | req->copy = tb_cfg_copy; | |
808 | req->request = &request; | |
809 | req->request_size = sizeof(request); | |
810 | req->request_type = TB_CFG_PKG_RESET; | |
811 | req->response = &reply; | |
812 | req->response_size = sizeof(reply); | |
02729d17 | 813 | req->response_type = TB_CFG_PKG_RESET; |
d7f781bf MW |
814 | |
815 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | |
f25bf6fc | 816 | |
d7f781bf | 817 | tb_cfg_request_put(req); |
f25bf6fc | 818 | |
d7f781bf | 819 | return res; |
f25bf6fc AN |
820 | } |
821 | ||
822 | /** | |
823 | * tb_cfg_read() - read from config space into buffer | |
824 | * | |
825 | * Offset and length are in dwords. | |
826 | */ | |
827 | struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, | |
828 | u64 route, u32 port, enum tb_cfg_space space, | |
829 | u32 offset, u32 length, int timeout_msec) | |
830 | { | |
831 | struct tb_cfg_result res = { 0 }; | |
832 | struct cfg_read_pkg request = { | |
05c242e9 | 833 | .header = tb_cfg_make_header(route), |
f25bf6fc AN |
834 | .addr = { |
835 | .port = port, | |
836 | .space = space, | |
837 | .offset = offset, | |
838 | .length = length, | |
839 | }, | |
840 | }; | |
841 | struct cfg_write_pkg reply; | |
d7f781bf | 842 | int retries = 0; |
f25bf6fc | 843 | |
d7f781bf MW |
844 | while (retries < TB_CTL_RETRIES) { |
845 | struct tb_cfg_request *req; | |
846 | ||
847 | req = tb_cfg_request_alloc(); | |
848 | if (!req) { | |
849 | res.err = -ENOMEM; | |
850 | return res; | |
851 | } | |
852 | ||
853 | request.addr.seq = retries++; | |
854 | ||
855 | req->match = tb_cfg_match; | |
856 | req->copy = tb_cfg_copy; | |
857 | req->request = &request; | |
858 | req->request_size = sizeof(request); | |
859 | req->request_type = TB_CFG_PKG_READ; | |
860 | req->response = &reply; | |
861 | req->response_size = 12 + 4 * length; | |
862 | req->response_type = TB_CFG_PKG_READ; | |
863 | ||
864 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | |
865 | ||
866 | tb_cfg_request_put(req); | |
867 | ||
868 | if (res.err != -ETIMEDOUT) | |
869 | break; | |
870 | ||
871 | /* Wait a bit (arbitrary time) until we send a retry */ | |
872 | usleep_range(10, 100); | |
873 | } | |
f25bf6fc | 874 | |
f25bf6fc AN |
875 | if (res.err) |
876 | return res; | |
877 | ||
878 | res.response_port = reply.addr.port; | |
879 | res.err = check_config_address(reply.addr, space, offset, length); | |
880 | if (!res.err) | |
881 | memcpy(buffer, &reply.data, 4 * length); | |
882 | return res; | |
883 | } | |
884 | ||
885 | /** | |
886 | * tb_cfg_write() - write from buffer into config space | |
887 | * | |
888 | * Offset and length are in dwords. | |
889 | */ | |
16a1258a | 890 | struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, |
f25bf6fc AN |
891 | u64 route, u32 port, enum tb_cfg_space space, |
892 | u32 offset, u32 length, int timeout_msec) | |
893 | { | |
894 | struct tb_cfg_result res = { 0 }; | |
895 | struct cfg_write_pkg request = { | |
05c242e9 | 896 | .header = tb_cfg_make_header(route), |
f25bf6fc AN |
897 | .addr = { |
898 | .port = port, | |
899 | .space = space, | |
900 | .offset = offset, | |
901 | .length = length, | |
902 | }, | |
903 | }; | |
904 | struct cfg_read_pkg reply; | |
d7f781bf | 905 | int retries = 0; |
f25bf6fc AN |
906 | |
907 | memcpy(&request.data, buffer, length * 4); | |
908 | ||
d7f781bf MW |
909 | while (retries < TB_CTL_RETRIES) { |
910 | struct tb_cfg_request *req; | |
911 | ||
912 | req = tb_cfg_request_alloc(); | |
913 | if (!req) { | |
914 | res.err = -ENOMEM; | |
915 | return res; | |
916 | } | |
917 | ||
918 | request.addr.seq = retries++; | |
919 | ||
920 | req->match = tb_cfg_match; | |
921 | req->copy = tb_cfg_copy; | |
922 | req->request = &request; | |
923 | req->request_size = 12 + 4 * length; | |
924 | req->request_type = TB_CFG_PKG_WRITE; | |
925 | req->response = &reply; | |
926 | req->response_size = sizeof(reply); | |
927 | req->response_type = TB_CFG_PKG_WRITE; | |
928 | ||
929 | res = tb_cfg_request_sync(ctl, req, timeout_msec); | |
930 | ||
931 | tb_cfg_request_put(req); | |
932 | ||
933 | if (res.err != -ETIMEDOUT) | |
934 | break; | |
935 | ||
936 | /* Wait a bit (arbitrary time) until we send a retry */ | |
937 | usleep_range(10, 100); | |
938 | } | |
f25bf6fc | 939 | |
f25bf6fc AN |
940 | if (res.err) |
941 | return res; | |
942 | ||
943 | res.response_port = reply.addr.port; | |
944 | res.err = check_config_address(reply.addr, space, offset, length); | |
945 | return res; | |
946 | } | |
947 | ||
d94dcbb1 MW |
948 | static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, |
949 | const struct tb_cfg_result *res) | |
950 | { | |
951 | /* | |
952 | * For unimplemented ports access to port config space may return | |
953 | * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is | |
954 | * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so | |
955 | * that the caller can mark the port as disabled. | |
956 | */ | |
957 | if (space == TB_CFG_PORT && | |
958 | res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) | |
959 | return -ENODEV; | |
960 | ||
961 | tb_cfg_print_error(ctl, res); | |
80e7c5dd MW |
962 | |
963 | if (res->tb_error == TB_CFG_ERROR_LOCK) | |
964 | return -EACCES; | |
463e48fa MW |
965 | else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) |
966 | return -ENOTCONN; | |
967 | ||
d94dcbb1 MW |
968 | return -EIO; |
969 | } | |
970 | ||
f25bf6fc AN |
971 | int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, |
972 | enum tb_cfg_space space, u32 offset, u32 length) | |
973 | { | |
974 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, | |
975 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | |
d7f781bf MW |
976 | switch (res.err) { |
977 | case 0: | |
978 | /* Success */ | |
979 | break; | |
980 | ||
981 | case 1: | |
982 | /* Thunderbolt error, tb_error holds the actual number */ | |
d94dcbb1 | 983 | return tb_cfg_get_error(ctl, space, &res); |
d7f781bf MW |
984 | |
985 | case -ETIMEDOUT: | |
68b91293 MW |
986 | tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", |
987 | route, space, offset); | |
d7f781bf MW |
988 | break; |
989 | ||
990 | default: | |
991 | WARN(1, "tb_cfg_read: %d\n", res.err); | |
992 | break; | |
f25bf6fc | 993 | } |
f25bf6fc AN |
994 | return res.err; |
995 | } | |
996 | ||
16a1258a | 997 | int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, |
f25bf6fc AN |
998 | enum tb_cfg_space space, u32 offset, u32 length) |
999 | { | |
1000 | struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, | |
1001 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | |
d7f781bf MW |
1002 | switch (res.err) { |
1003 | case 0: | |
1004 | /* Success */ | |
1005 | break; | |
1006 | ||
1007 | case 1: | |
1008 | /* Thunderbolt error, tb_error holds the actual number */ | |
d94dcbb1 | 1009 | return tb_cfg_get_error(ctl, space, &res); |
d7f781bf MW |
1010 | |
1011 | case -ETIMEDOUT: | |
68b91293 MW |
1012 | tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", |
1013 | route, space, offset); | |
d7f781bf MW |
1014 | break; |
1015 | ||
1016 | default: | |
1017 | WARN(1, "tb_cfg_write: %d\n", res.err); | |
1018 | break; | |
f25bf6fc | 1019 | } |
f25bf6fc AN |
1020 | return res.err; |
1021 | } | |
1022 | ||
1023 | /** | |
1024 | * tb_cfg_get_upstream_port() - get upstream port number of switch at route | |
1025 | * | |
1026 | * Reads the first dword from the switches TB_CFG_SWITCH config area and | |
1027 | * returns the port number from which the reply originated. | |
1028 | * | |
1029 | * Return: Returns the upstream port number on success or an error code on | |
1030 | * failure. | |
1031 | */ | |
1032 | int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) | |
1033 | { | |
1034 | u32 dummy; | |
1035 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, | |
1036 | TB_CFG_SWITCH, 0, 1, | |
1037 | TB_CFG_DEFAULT_TIMEOUT); | |
1038 | if (res.err == 1) | |
1039 | return -EIO; | |
1040 | if (res.err) | |
1041 | return res.err; | |
1042 | return res.response_port; | |
1043 | } |