]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/greybus/connection.c
Merge tag 'drm-next-5.6-2019-12-11' of git://people.freedesktop.org/~agd5f/linux...
[mirror_ubuntu-jammy-kernel.git] / drivers / greybus / connection.c
CommitLineData
eb50fd3a 1// SPDX-License-Identifier: GPL-2.0
c68adb2f
AE
2/*
3 * Greybus connections
4 *
5 * Copyright 2014 Google Inc.
a46e9671 6 * Copyright 2014 Linaro Ltd.
c68adb2f
AE
7 */
8
5a5bc354 9#include <linux/workqueue.h>
ec0ad868 10#include <linux/greybus.h>
5a5bc354 11
79c8c649 12#include "greybus_trace.h"
c68adb2f 13
aac0839e
JH
14#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
15
0e46fab7
JH
16static void gb_connection_kref_release(struct kref *kref);
17
748e1230 18static DEFINE_SPINLOCK(gb_connections_lock);
210b508e
JH
19static DEFINE_MUTEX(gb_connection_mutex);
20
b53e0c9e 21/* Caller holds gb_connection_mutex. */
54131222 22static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
f5c2be9e 23{
2537636a 24 struct gb_host_device *hd = intf->hd;
f5c2be9e
AE
25 struct gb_connection *connection;
26
0daf17b9
JH
27 list_for_each_entry(connection, &hd->connections, hd_links) {
28 if (connection->intf == intf &&
8478c35a 29 connection->intf_cport_id == cport_id)
54131222 30 return true;
0daf17b9
JH
31 }
32
54131222 33 return false;
f5c2be9e
AE
34}
35
0e46fab7
JH
36static void gb_connection_get(struct gb_connection *connection)
37{
38 kref_get(&connection->kref);
79c8c649
AE
39
40 trace_gb_connection_get(connection);
0e46fab7
JH
41}
42
43static void gb_connection_put(struct gb_connection *connection)
44{
79c8c649
AE
45 trace_gb_connection_put(connection);
46
0e46fab7
JH
47 kref_put(&connection->kref, gb_connection_kref_release);
48}
49
50/*
51 * Returns a reference-counted pointer to the connection if found.
52 */
8bd0ae6e 53static struct gb_connection *
2537636a 54gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
ee9ebe4d 55{
f5c2be9e 56 struct gb_connection *connection;
8f5eadb7 57 unsigned long flags;
ee9ebe4d 58
8f5eadb7 59 spin_lock_irqsave(&gb_connections_lock, flags);
2c43ce49 60 list_for_each_entry(connection, &hd->connections, hd_links)
0e46fab7
JH
61 if (connection->hd_cport_id == cport_id) {
62 gb_connection_get(connection);
e86905b6 63 goto found;
0e46fab7 64 }
e86905b6 65 connection = NULL;
f5c2be9e 66found:
8f5eadb7 67 spin_unlock_irqrestore(&gb_connections_lock, flags);
ee9ebe4d
AE
68
69 return connection;
70}
71
de3557d9
AE
72/*
73 * Callback from the host driver to let us know that data has been
1db0a5ff 74 * received on the bundle.
de3557d9 75 */
2537636a 76void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
8478c35a 77 u8 *data, size_t length)
374e6a26
AE
78{
79 struct gb_connection *connection;
80
495787a7
AE
81 trace_gb_hd_in(hd);
82
12eba9f8 83 connection = gb_connection_hd_find(hd, cport_id);
374e6a26 84 if (!connection) {
2adaefb1 85 dev_err(&hd->dev,
374e6a26
AE
86 "nonexistent connection (%zu bytes dropped)\n", length);
87 return;
88 }
61089e89 89 gb_connection_recv(connection, data, length);
0e46fab7 90 gb_connection_put(connection);
374e6a26 91}
de3557d9 92EXPORT_SYMBOL_GPL(greybus_data_rcvd);
374e6a26 93
b750fa33 94static void gb_connection_kref_release(struct kref *kref)
f0f61b90 95{
b750fa33 96 struct gb_connection *connection;
f0f61b90 97
b750fa33 98 connection = container_of(kref, struct gb_connection, kref);
c3681f6c 99
79c8c649
AE
100 trace_gb_connection_release(connection);
101
f0f61b90
GKH
102 kfree(connection);
103}
104
729b260a
JH
105static void gb_connection_init_name(struct gb_connection *connection)
106{
107 u16 hd_cport_id = connection->hd_cport_id;
108 u16 cport_id = 0;
109 u8 intf_id = 0;
110
111 if (connection->intf) {
112 intf_id = connection->intf->interface_id;
113 cport_id = connection->intf_cport_id;
114 }
115
116 snprintf(connection->name, sizeof(connection->name),
8478c35a 117 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
729b260a
JH
118}
119
c68adb2f 120/*
96c2af5c 121 * _gb_connection_create() - create a Greybus connection
2566fae6
JH
122 * @hd: host device of the connection
123 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
124 * @intf: remote interface, or NULL for static connections
125 * @bundle: remote-interface bundle (may be NULL)
126 * @cport_id: remote-interface cport id, or 0 for static connections
f7ee081e 127 * @handler: request handler (may be NULL)
cb033188 128 * @flags: connection flags
2566fae6
JH
129 *
130 * Create a Greybus connection, representing the bidirectional link
c68adb2f 131 * between a CPort on a (local) Greybus host device and a CPort on
2566fae6 132 * another Greybus interface.
c68adb2f 133 *
e88afa58
AE
134 * A connection also maintains the state of operations sent over the
135 * connection.
136 *
210b508e
JH
137 * Serialised against concurrent create and destroy using the
138 * gb_connection_mutex.
139 *
24e094d6
JH
140 * Return: A pointer to the new connection if successful, or an ERR_PTR
141 * otherwise.
c68adb2f 142 */
2566fae6 143static struct gb_connection *
96c2af5c 144_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
8478c35a
CS
145 struct gb_interface *intf,
146 struct gb_bundle *bundle, int cport_id,
147 gb_request_handler_t handler,
148 unsigned long flags)
c68adb2f
AE
149{
150 struct gb_connection *connection;
24e094d6 151 int ret;
c68adb2f 152
210b508e
JH
153 mutex_lock(&gb_connection_mutex);
154
54131222 155 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
b53e0c9e 156 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
24e094d6 157 ret = -EBUSY;
b53e0c9e
JH
158 goto err_unlock;
159 }
160
f2aae1c6 161 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
74a5d93c
JH
162 if (ret < 0) {
163 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
210b508e 164 goto err_unlock;
74a5d93c 165 }
24e094d6 166 hd_cport_id = ret;
10f9fa13 167
c68adb2f 168 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
24e094d6
JH
169 if (!connection) {
170 ret = -ENOMEM;
74a5d93c 171 goto err_hd_cport_release;
24e094d6 172 }
c68adb2f 173
7c63a827 174 connection->hd_cport_id = hd_cport_id;
f9b0366f 175 connection->intf_cport_id = cport_id;
f5c2be9e 176 connection->hd = hd;
2566fae6 177 connection->intf = intf;
1db0a5ff 178 connection->bundle = bundle;
f7ee081e 179 connection->handler = handler;
cb033188 180 connection->flags = flags;
0e9b41ab
JH
181 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
182 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
36561f23 183 connection->state = GB_CONNECTION_STATE_DISABLED;
ad1c449e 184
4e2b1e46 185 atomic_set(&connection->op_cycle, 0);
23268785 186 mutex_init(&connection->mutex);
4e2b1e46
JH
187 spin_lock_init(&connection->lock);
188 INIT_LIST_HEAD(&connection->operations);
189
5a5bc354 190 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
582b3a13 191 dev_name(&hd->dev), hd_cport_id);
24e094d6
JH
192 if (!connection->wq) {
193 ret = -ENOMEM;
5a5bc354 194 goto err_free_connection;
24e094d6 195 }
5a5bc354 196
b750fa33 197 kref_init(&connection->kref);
f0f61b90 198
729b260a
JH
199 gb_connection_init_name(connection);
200
0b1d2623 201 spin_lock_irq(&gb_connections_lock);
928f2abd 202 list_add(&connection->hd_links, &hd->connections);
75662e5c
VK
203
204 if (bundle)
205 list_add(&connection->bundle_links, &bundle->connections);
206 else
207 INIT_LIST_HEAD(&connection->bundle_links);
208
0b1d2623 209 spin_unlock_irq(&gb_connections_lock);
748e1230 210
210b508e
JH
211 mutex_unlock(&gb_connection_mutex);
212
79c8c649
AE
213 trace_gb_connection_create(connection);
214
c68adb2f 215 return connection;
10f9fa13 216
5a5bc354
JH
217err_free_connection:
218 kfree(connection);
74a5d93c
JH
219err_hd_cport_release:
220 gb_hd_cport_release(hd, hd_cport_id);
210b508e
JH
221err_unlock:
222 mutex_unlock(&gb_connection_mutex);
10f9fa13 223
24e094d6 224 return ERR_PTR(ret);
c68adb2f
AE
225}
226
2566fae6 227struct gb_connection *
f7ee081e 228gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
8478c35a 229 gb_request_handler_t handler)
2566fae6 230{
cb033188 231 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
8478c35a 232 GB_CONNECTION_FLAG_HIGH_PRIO);
2566fae6
JH
233}
234
59507e26
JH
235struct gb_connection *
236gb_connection_create_control(struct gb_interface *intf)
237{
aca7aab3 238 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
8478c35a
CS
239 GB_CONNECTION_FLAG_CONTROL |
240 GB_CONNECTION_FLAG_HIGH_PRIO);
59507e26
JH
241}
242
2566fae6 243struct gb_connection *
f7ee081e 244gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
8478c35a 245 gb_request_handler_t handler)
2566fae6 246{
8cff6c64
JH
247 struct gb_interface *intf = bundle->intf;
248
f7ee081e 249 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
8478c35a 250 handler, 0);
2566fae6 251}
96c2af5c 252EXPORT_SYMBOL_GPL(gb_connection_create);
2566fae6 253
cb033188
JH
254struct gb_connection *
255gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
8478c35a
CS
256 gb_request_handler_t handler,
257 unsigned long flags)
cb033188
JH
258{
259 struct gb_interface *intf = bundle->intf;
260
1ba30c33
JH
261 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
262 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
263
cb033188 264 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
8478c35a 265 handler, flags);
cb033188
JH
266}
267EXPORT_SYMBOL_GPL(gb_connection_create_flags);
268
781ac866
JH
269struct gb_connection *
270gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
8478c35a 271 unsigned long flags)
781ac866 272{
781ac866
JH
273 flags |= GB_CONNECTION_FLAG_OFFLOADED;
274
1ba30c33 275 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
781ac866
JH
276}
277EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
278
d7ea30a5
JH
279static int gb_connection_hd_cport_enable(struct gb_connection *connection)
280{
2537636a 281 struct gb_host_device *hd = connection->hd;
d7ea30a5
JH
282 int ret;
283
284 if (!hd->driver->cport_enable)
285 return 0;
286
6910fa2d 287 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
8478c35a 288 connection->flags);
d7ea30a5 289 if (ret) {
62e04623 290 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
8478c35a 291 connection->name, ret);
d7ea30a5
JH
292 return ret;
293 }
294
295 return 0;
296}
297
298static void gb_connection_hd_cport_disable(struct gb_connection *connection)
299{
2537636a 300 struct gb_host_device *hd = connection->hd;
3cbe52c2 301 int ret;
d7ea30a5
JH
302
303 if (!hd->driver->cport_disable)
304 return;
305
3cbe52c2
JH
306 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
307 if (ret) {
62e04623 308 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
8478c35a 309 connection->name, ret);
3cbe52c2 310 }
d7ea30a5
JH
311}
312
aac0839e
JH
313static int gb_connection_hd_cport_connected(struct gb_connection *connection)
314{
315 struct gb_host_device *hd = connection->hd;
316 int ret;
317
318 if (!hd->driver->cport_connected)
319 return 0;
320
321 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
322 if (ret) {
323 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
8478c35a 324 connection->name, ret);
aac0839e
JH
325 return ret;
326 }
327
328 return 0;
329}
330
800d6c8f
JH
331static int gb_connection_hd_cport_flush(struct gb_connection *connection)
332{
333 struct gb_host_device *hd = connection->hd;
334 int ret;
335
336 if (!hd->driver->cport_flush)
337 return 0;
338
339 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
340 if (ret) {
341 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
8478c35a 342 connection->name, ret);
800d6c8f
JH
343 return ret;
344 }
345
346 return 0;
347}
348
aac0839e 349static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
9ed5e1ba
FP
350{
351 struct gb_host_device *hd = connection->hd;
aac0839e 352 size_t peer_space;
9ed5e1ba
FP
353 int ret;
354
f05a88a3
JH
355 if (!hd->driver->cport_quiesce)
356 return 0;
357
aac0839e
JH
358 peer_space = sizeof(struct gb_operation_msg_hdr) +
359 sizeof(struct gb_cport_shutdown_request);
9ed5e1ba 360
aac0839e
JH
361 if (connection->mode_switch)
362 peer_space += sizeof(struct gb_operation_msg_hdr);
363
364 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
365 peer_space,
366 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
9ed5e1ba 367 if (ret) {
aac0839e 368 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
8478c35a 369 connection->name, ret);
9ed5e1ba
FP
370 return ret;
371 }
372
373 return 0;
374}
375
aac0839e 376static int gb_connection_hd_cport_clear(struct gb_connection *connection)
9ed5e1ba
FP
377{
378 struct gb_host_device *hd = connection->hd;
aac0839e 379 int ret;
9ed5e1ba 380
f05a88a3
JH
381 if (!hd->driver->cport_clear)
382 return 0;
383
aac0839e
JH
384 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
385 if (ret) {
386 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
8478c35a 387 connection->name, ret);
aac0839e
JH
388 return ret;
389 }
9ed5e1ba 390
aac0839e 391 return 0;
9ed5e1ba
FP
392}
393
a95c258c
JH
394/*
395 * Request the SVC to create a connection from AP's cport to interface's
396 * cport.
397 */
398static int
399gb_connection_svc_connection_create(struct gb_connection *connection)
400{
2537636a 401 struct gb_host_device *hd = connection->hd;
1575ef18 402 struct gb_interface *intf;
27f25c17 403 u8 cport_flags;
a95c258c
JH
404 int ret;
405
4ec1574a 406 if (gb_connection_is_static(connection))
00ad6975 407 return 0;
a95c258c 408
35822c04 409 intf = connection->intf;
27f25c17 410
ec199ccd 411 /*
0e9b41ab 412 * Enable either E2EFC or CSD, unless no flow control is requested.
ec199ccd 413 */
27f25c17 414 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
0e9b41ab 415 if (gb_connection_flow_control_disabled(connection)) {
27f25c17 416 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
64a6d138 417 } else if (gb_connection_e2efc_enabled(connection)) {
27f25c17
JH
418 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
419 GB_SVC_CPORT_FLAG_E2EFC;
420 }
421
a95c258c 422 ret = gb_svc_connection_create(hd->svc,
8478c35a
CS
423 hd->svc->ap_intf_id,
424 connection->hd_cport_id,
425 intf->interface_id,
426 connection->intf_cport_id,
427 cport_flags);
a95c258c 428 if (ret) {
4c4b5021
JH
429 dev_err(&connection->hd->dev,
430 "%s: failed to create svc connection: %d\n",
431 connection->name, ret);
a95c258c
JH
432 return ret;
433 }
434
a95c258c
JH
435 return 0;
436}
437
1b7a9cd5
VK
438static void
439gb_connection_svc_connection_destroy(struct gb_connection *connection)
440{
4ec1574a 441 if (gb_connection_is_static(connection))
1b7a9cd5
VK
442 return;
443
444 gb_svc_connection_destroy(connection->hd->svc,
66069fb0 445 connection->hd->svc->ap_intf_id,
1b7a9cd5 446 connection->hd_cport_id,
35822c04 447 connection->intf->interface_id,
1b7a9cd5
VK
448 connection->intf_cport_id);
449}
450
9d7fc25b
JH
451/* Inform Interface about active CPorts */
452static int gb_connection_control_connected(struct gb_connection *connection)
453{
9d7fc25b
JH
454 struct gb_control *control;
455 u16 cport_id = connection->intf_cport_id;
456 int ret;
457
bc3be170 458 if (gb_connection_is_static(connection))
9d7fc25b
JH
459 return 0;
460
aca7aab3 461 if (gb_connection_is_control(connection))
bc3be170 462 return 0;
9d7fc25b 463
aca7aab3
JH
464 control = connection->intf->control;
465
9d7fc25b
JH
466 ret = gb_control_connected_operation(control, cport_id);
467 if (ret) {
30482c1e
GKH
468 dev_err(&connection->bundle->dev,
469 "failed to connect cport: %d\n", ret);
9d7fc25b
JH
470 return ret;
471 }
472
473 return 0;
474}
475
3de5acfa
JH
476static void
477gb_connection_control_disconnecting(struct gb_connection *connection)
478{
479 struct gb_control *control;
480 u16 cport_id = connection->intf_cport_id;
481 int ret;
482
483 if (gb_connection_is_static(connection))
484 return;
485
486 control = connection->intf->control;
487
488 ret = gb_control_disconnecting_operation(control, cport_id);
489 if (ret) {
490 dev_err(&connection->hd->dev,
8478c35a
CS
491 "%s: failed to send disconnecting: %d\n",
492 connection->name, ret);
3de5acfa
JH
493 }
494}
495
72d74822
JH
496static void
497gb_connection_control_disconnected(struct gb_connection *connection)
18690659
VK
498{
499 struct gb_control *control;
72d74822 500 u16 cport_id = connection->intf_cport_id;
18690659
VK
501 int ret;
502
bc3be170 503 if (gb_connection_is_static(connection))
18690659
VK
504 return;
505
55742d2a
JH
506 control = connection->intf->control;
507
508 if (gb_connection_is_control(connection)) {
509 if (connection->mode_switch) {
510 ret = gb_control_mode_switch_operation(control);
511 if (ret) {
512 /*
513 * Allow mode switch to time out waiting for
514 * mailbox event.
515 */
516 return;
517 }
518 }
519
bc3be170 520 return;
55742d2a 521 }
18690659
VK
522
523 ret = gb_control_disconnected_operation(control, cport_id);
72d74822 524 if (ret) {
30482c1e
GKH
525 dev_warn(&connection->bundle->dev,
526 "failed to disconnect cport: %d\n", ret);
72d74822 527 }
18690659
VK
528}
529
aac0839e 530static int gb_connection_shutdown_operation(struct gb_connection *connection,
8478c35a 531 u8 phase)
3de5acfa 532{
aac0839e 533 struct gb_cport_shutdown_request *req;
3de5acfa
JH
534 struct gb_operation *operation;
535 int ret;
536
537 operation = gb_operation_create_core(connection,
8478c35a
CS
538 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
539 sizeof(*req), 0, 0,
540 GFP_KERNEL);
3de5acfa
JH
541 if (!operation)
542 return -ENOMEM;
543
aac0839e
JH
544 req = operation->request->payload;
545 req->phase = phase;
546
3de5acfa
JH
547 ret = gb_operation_request_send_sync(operation);
548
549 gb_operation_put(operation);
550
551 return ret;
552}
553
aac0839e
JH
554static int gb_connection_cport_shutdown(struct gb_connection *connection,
555 u8 phase)
3de5acfa
JH
556{
557 struct gb_host_device *hd = connection->hd;
aac0839e 558 const struct gb_hd_driver *drv = hd->driver;
3de5acfa
JH
559 int ret;
560
561 if (gb_connection_is_static(connection))
562 return 0;
563
564 if (gb_connection_is_offloaded(connection)) {
aac0839e 565 if (!drv->cport_shutdown)
3de5acfa
JH
566 return 0;
567
aac0839e 568 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
8478c35a 569 GB_OPERATION_TIMEOUT_DEFAULT);
3de5acfa 570 } else {
aac0839e 571 ret = gb_connection_shutdown_operation(connection, phase);
3de5acfa
JH
572 }
573
574 if (ret) {
aac0839e 575 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
8478c35a 576 connection->name, phase, ret);
3de5acfa
JH
577 return ret;
578 }
579
580 return 0;
581}
582
aac0839e
JH
583static int
584gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
585{
586 return gb_connection_cport_shutdown(connection, 1);
587}
588
589static int
590gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
591{
592 return gb_connection_cport_shutdown(connection, 2);
593}
594
520c6eae
JH
595/*
596 * Cancel all active operations on a connection.
597 *
3de5acfa
JH
598 * Locking: Called with connection lock held and state set to DISABLED or
599 * DISCONNECTING.
520c6eae
JH
600 */
601static void gb_connection_cancel_operations(struct gb_connection *connection,
8478c35a 602 int errno)
127c1fbd 603 __must_hold(&connection->lock)
520c6eae
JH
604{
605 struct gb_operation *operation;
606
607 while (!list_empty(&connection->operations)) {
608 operation = list_last_entry(&connection->operations,
8478c35a 609 struct gb_operation, links);
520c6eae 610 gb_operation_get(operation);
a29bac62 611 spin_unlock_irq(&connection->lock);
520c6eae
JH
612
613 if (gb_operation_is_incoming(operation))
614 gb_operation_cancel_incoming(operation, errno);
615 else
616 gb_operation_cancel(operation, errno);
617
618 gb_operation_put(operation);
619
a29bac62 620 spin_lock_irq(&connection->lock);
520c6eae
JH
621 }
622}
623
beb6b7fe
JH
624/*
625 * Cancel all active incoming operations on a connection.
626 *
627 * Locking: Called with connection lock held and state set to ENABLED_TX.
628 */
629static void
630gb_connection_flush_incoming_operations(struct gb_connection *connection,
8478c35a 631 int errno)
127c1fbd 632 __must_hold(&connection->lock)
beb6b7fe
JH
633{
634 struct gb_operation *operation;
635 bool incoming;
636
637 while (!list_empty(&connection->operations)) {
638 incoming = false;
639 list_for_each_entry(operation, &connection->operations,
8478c35a 640 links) {
beb6b7fe
JH
641 if (gb_operation_is_incoming(operation)) {
642 gb_operation_get(operation);
643 incoming = true;
644 break;
645 }
646 }
647
648 if (!incoming)
649 break;
650
a29bac62 651 spin_unlock_irq(&connection->lock);
beb6b7fe
JH
652
653 /* FIXME: flush, not cancel? */
654 gb_operation_cancel_incoming(operation, errno);
655 gb_operation_put(operation);
656
a29bac62 657 spin_lock_irq(&connection->lock);
beb6b7fe
JH
658 }
659}
660
f7ee081e
JH
661/*
662 * _gb_connection_enable() - enable a connection
663 * @connection: connection to enable
664 * @rx: whether to enable incoming requests
665 *
666 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
667 * ENABLED_TX->ENABLED state transitions.
668 *
669 * Locking: Caller holds connection->mutex.
670 */
671static int _gb_connection_enable(struct gb_connection *connection, bool rx)
574341c6 672{
36561f23
AE
673 int ret;
674
f7ee081e 675 /* Handle ENABLED_TX -> ENABLED transitions. */
570dfa7c 676 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
f7ee081e
JH
677 if (!(connection->handler && rx))
678 return 0;
570dfa7c 679
a29bac62 680 spin_lock_irq(&connection->lock);
570dfa7c 681 connection->state = GB_CONNECTION_STATE_ENABLED;
a29bac62 682 spin_unlock_irq(&connection->lock);
570dfa7c 683
f7ee081e 684 return 0;
570dfa7c
JH
685 }
686
d7ea30a5 687 ret = gb_connection_hd_cport_enable(connection);
a95c258c 688 if (ret)
f7ee081e 689 return ret;
a1163fae 690
d7ea30a5
JH
691 ret = gb_connection_svc_connection_create(connection);
692 if (ret)
aac0839e 693 goto err_hd_cport_clear;
d7ea30a5 694
aac0839e 695 ret = gb_connection_hd_cport_connected(connection);
00ad6975
JH
696 if (ret)
697 goto err_svc_connection_destroy;
698
a29bac62 699 spin_lock_irq(&connection->lock);
f7ee081e 700 if (connection->handler && rx)
570dfa7c
JH
701 connection->state = GB_CONNECTION_STATE_ENABLED;
702 else
703 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
a29bac62 704 spin_unlock_irq(&connection->lock);
cad09a8f 705
4d0bee11
JH
706 ret = gb_connection_control_connected(connection);
707 if (ret)
3de5acfa 708 goto err_control_disconnecting;
4d0bee11 709
3ea6a815 710 return 0;
36561f23 711
3de5acfa 712err_control_disconnecting:
a29bac62 713 spin_lock_irq(&connection->lock);
3de5acfa 714 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
a29bac62
JH
715 gb_connection_cancel_operations(connection, -ESHUTDOWN);
716 spin_unlock_irq(&connection->lock);
8d7a712c 717
800d6c8f
JH
718 /* Transmit queue should already be empty. */
719 gb_connection_hd_cport_flush(connection);
720
aac0839e
JH
721 gb_connection_control_disconnecting(connection);
722 gb_connection_cport_shutdown_phase_1(connection);
723 gb_connection_hd_cport_quiesce(connection);
724 gb_connection_cport_shutdown_phase_2(connection);
3de5acfa
JH
725 gb_connection_control_disconnected(connection);
726 connection->state = GB_CONNECTION_STATE_DISABLED;
00ad6975 727err_svc_connection_destroy:
3ea6a815 728 gb_connection_svc_connection_destroy(connection);
aac0839e
JH
729err_hd_cport_clear:
730 gb_connection_hd_cport_clear(connection);
731
3ea6a815 732 gb_connection_hd_cport_disable(connection);
f7ee081e
JH
733
734 return ret;
735}
736
737int gb_connection_enable(struct gb_connection *connection)
738{
739 int ret = 0;
740
741 mutex_lock(&connection->mutex);
742
743 if (connection->state == GB_CONNECTION_STATE_ENABLED)
744 goto out_unlock;
745
746 ret = _gb_connection_enable(connection, true);
79c8c649
AE
747 if (!ret)
748 trace_gb_connection_enable(connection);
749
f7ee081e 750out_unlock:
23268785 751 mutex_unlock(&connection->mutex);
3ea6a815
JH
752
753 return ret;
754}
755EXPORT_SYMBOL_GPL(gb_connection_enable);
18690659 756
f7ee081e
JH
757int gb_connection_enable_tx(struct gb_connection *connection)
758{
759 int ret = 0;
760
761 mutex_lock(&connection->mutex);
762
763 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
764 ret = -EINVAL;
765 goto out_unlock;
766 }
767
768 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
769 goto out_unlock;
770
771 ret = _gb_connection_enable(connection, false);
79c8c649
AE
772 if (!ret)
773 trace_gb_connection_enable(connection);
774
f7ee081e
JH
775out_unlock:
776 mutex_unlock(&connection->mutex);
777
778 return ret;
779}
780EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
781
beb6b7fe
JH
782void gb_connection_disable_rx(struct gb_connection *connection)
783{
784 mutex_lock(&connection->mutex);
785
a29bac62 786 spin_lock_irq(&connection->lock);
beb6b7fe 787 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
a29bac62 788 spin_unlock_irq(&connection->lock);
beb6b7fe
JH
789 goto out_unlock;
790 }
791 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
a29bac62
JH
792 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
793 spin_unlock_irq(&connection->lock);
beb6b7fe 794
79c8c649
AE
795 trace_gb_connection_disable(connection);
796
beb6b7fe
JH
797out_unlock:
798 mutex_unlock(&connection->mutex);
799}
6d58e714 800EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
beb6b7fe 801
55742d2a
JH
802void gb_connection_mode_switch_prepare(struct gb_connection *connection)
803{
804 connection->mode_switch = true;
805}
806
807void gb_connection_mode_switch_complete(struct gb_connection *connection)
808{
809 gb_connection_svc_connection_destroy(connection);
aac0839e
JH
810 gb_connection_hd_cport_clear(connection);
811
55742d2a 812 gb_connection_hd_cport_disable(connection);
aac0839e 813
55742d2a
JH
814 connection->mode_switch = false;
815}
816
3ea6a815
JH
817void gb_connection_disable(struct gb_connection *connection)
818{
23268785
JH
819 mutex_lock(&connection->mutex);
820
81fba249 821 if (connection->state == GB_CONNECTION_STATE_DISABLED)
23268785 822 goto out_unlock;
81fba249 823
0698be02
VK
824 trace_gb_connection_disable(connection);
825
a29bac62 826 spin_lock_irq(&connection->lock);
3de5acfa 827 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
a29bac62
JH
828 gb_connection_cancel_operations(connection, -ESHUTDOWN);
829 spin_unlock_irq(&connection->lock);
81fba249 830
800d6c8f
JH
831 gb_connection_hd_cport_flush(connection);
832
aac0839e
JH
833 gb_connection_control_disconnecting(connection);
834 gb_connection_cport_shutdown_phase_1(connection);
835 gb_connection_hd_cport_quiesce(connection);
836 gb_connection_cport_shutdown_phase_2(connection);
3de5acfa
JH
837 gb_connection_control_disconnected(connection);
838
839 connection->state = GB_CONNECTION_STATE_DISABLED;
840
55742d2a
JH
841 /* control-connection tear down is deferred when mode switching */
842 if (!connection->mode_switch) {
843 gb_connection_svc_connection_destroy(connection);
aac0839e
JH
844 gb_connection_hd_cport_clear(connection);
845
55742d2a
JH
846 gb_connection_hd_cport_disable(connection);
847 }
23268785
JH
848
849out_unlock:
850 mutex_unlock(&connection->mutex);
3ea6a815
JH
851}
852EXPORT_SYMBOL_GPL(gb_connection_disable);
853
7aefe791
JH
854/* Disable a connection without communicating with the remote end. */
855void gb_connection_disable_forced(struct gb_connection *connection)
856{
857 mutex_lock(&connection->mutex);
858
859 if (connection->state == GB_CONNECTION_STATE_DISABLED)
860 goto out_unlock;
861
0698be02
VK
862 trace_gb_connection_disable(connection);
863
a29bac62 864 spin_lock_irq(&connection->lock);
7aefe791 865 connection->state = GB_CONNECTION_STATE_DISABLED;
a29bac62
JH
866 gb_connection_cancel_operations(connection, -ESHUTDOWN);
867 spin_unlock_irq(&connection->lock);
7aefe791 868
800d6c8f 869 gb_connection_hd_cport_flush(connection);
aac0839e 870
7aefe791 871 gb_connection_svc_connection_destroy(connection);
aac0839e 872 gb_connection_hd_cport_clear(connection);
7aefe791 873
aac0839e 874 gb_connection_hd_cport_disable(connection);
7aefe791
JH
875out_unlock:
876 mutex_unlock(&connection->mutex);
877}
878EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
879
c3681f6c 880/* Caller must have disabled the connection before destroying it. */
fda2381b
VK
881void gb_connection_destroy(struct gb_connection *connection)
882{
2edbf5ff 883 if (!connection)
fda2381b
VK
884 return;
885
814ae531
AE
886 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
887 gb_connection_disable(connection);
888
210b508e
JH
889 mutex_lock(&gb_connection_mutex);
890
0b1d2623 891 spin_lock_irq(&gb_connections_lock);
fda2381b
VK
892 list_del(&connection->bundle_links);
893 list_del(&connection->hd_links);
0b1d2623 894 spin_unlock_irq(&gb_connections_lock);
fda2381b 895
c3681f6c
JH
896 destroy_workqueue(connection->wq);
897
74a5d93c 898 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
fda2381b
VK
899 connection->hd_cport_id = CPORT_ID_BAD;
900
210b508e
JH
901 mutex_unlock(&gb_connection_mutex);
902
0e46fab7 903 gb_connection_put(connection);
fda2381b 904}
98fdf5a0 905EXPORT_SYMBOL_GPL(gb_connection_destroy);
fda2381b 906
e7e2efc4
BD
907void gb_connection_latency_tag_enable(struct gb_connection *connection)
908{
2537636a 909 struct gb_host_device *hd = connection->hd;
e7e2efc4
BD
910 int ret;
911
912 if (!hd->driver->latency_tag_enable)
913 return;
914
915 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
916 if (ret) {
4c4b5021
JH
917 dev_err(&connection->hd->dev,
918 "%s: failed to enable latency tag: %d\n",
919 connection->name, ret);
e7e2efc4
BD
920 }
921}
922EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
923
924void gb_connection_latency_tag_disable(struct gb_connection *connection)
925{
2537636a 926 struct gb_host_device *hd = connection->hd;
e7e2efc4
BD
927 int ret;
928
929 if (!hd->driver->latency_tag_disable)
930 return;
931
932 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
933 if (ret) {
4c4b5021
JH
934 dev_err(&connection->hd->dev,
935 "%s: failed to disable latency tag: %d\n",
936 connection->name, ret);
e7e2efc4
BD
937 }
938}
939EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);