]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/greybus/connection.c
staging: greybus: add host device function pointer checks
[mirror_ubuntu-artful-kernel.git] / drivers / staging / greybus / connection.c
1 /*
2 * Greybus connections
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13 #include "greybus_trace.h"
14
15
16 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
17
18
19 static void gb_connection_kref_release(struct kref *kref);
20
21
22 static DEFINE_SPINLOCK(gb_connections_lock);
23 static DEFINE_MUTEX(gb_connection_mutex);
24
25
26 /* Caller holds gb_connection_mutex. */
27 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
28 {
29 struct gb_host_device *hd = intf->hd;
30 struct gb_connection *connection;
31
32 list_for_each_entry(connection, &hd->connections, hd_links) {
33 if (connection->intf == intf &&
34 connection->intf_cport_id == cport_id)
35 return true;
36 }
37
38 return false;
39 }
40
41 static void gb_connection_get(struct gb_connection *connection)
42 {
43 kref_get(&connection->kref);
44
45 trace_gb_connection_get(connection);
46 }
47
48 static void gb_connection_put(struct gb_connection *connection)
49 {
50 trace_gb_connection_put(connection);
51
52 kref_put(&connection->kref, gb_connection_kref_release);
53 }
54
55 /*
56 * Returns a reference-counted pointer to the connection if found.
57 */
58 static struct gb_connection *
59 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
60 {
61 struct gb_connection *connection;
62 unsigned long flags;
63
64 spin_lock_irqsave(&gb_connections_lock, flags);
65 list_for_each_entry(connection, &hd->connections, hd_links)
66 if (connection->hd_cport_id == cport_id) {
67 gb_connection_get(connection);
68 goto found;
69 }
70 connection = NULL;
71 found:
72 spin_unlock_irqrestore(&gb_connections_lock, flags);
73
74 return connection;
75 }
76
77 /*
78 * Callback from the host driver to let us know that data has been
79 * received on the bundle.
80 */
81 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
82 u8 *data, size_t length)
83 {
84 struct gb_connection *connection;
85
86 trace_gb_hd_in(hd);
87
88 connection = gb_connection_hd_find(hd, cport_id);
89 if (!connection) {
90 dev_err(&hd->dev,
91 "nonexistent connection (%zu bytes dropped)\n", length);
92 return;
93 }
94 gb_connection_recv(connection, data, length);
95 gb_connection_put(connection);
96 }
97 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
98
99 static void gb_connection_kref_release(struct kref *kref)
100 {
101 struct gb_connection *connection;
102
103 connection = container_of(kref, struct gb_connection, kref);
104
105 trace_gb_connection_release(connection);
106
107 kfree(connection);
108 }
109
110 static void gb_connection_init_name(struct gb_connection *connection)
111 {
112 u16 hd_cport_id = connection->hd_cport_id;
113 u16 cport_id = 0;
114 u8 intf_id = 0;
115
116 if (connection->intf) {
117 intf_id = connection->intf->interface_id;
118 cport_id = connection->intf_cport_id;
119 }
120
121 snprintf(connection->name, sizeof(connection->name),
122 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
123 }
124
125 /*
126 * _gb_connection_create() - create a Greybus connection
127 * @hd: host device of the connection
128 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
129 * @intf: remote interface, or NULL for static connections
130 * @bundle: remote-interface bundle (may be NULL)
131 * @cport_id: remote-interface cport id, or 0 for static connections
132 * @handler: request handler (may be NULL)
133 * @flags: connection flags
134 *
135 * Create a Greybus connection, representing the bidirectional link
136 * between a CPort on a (local) Greybus host device and a CPort on
137 * another Greybus interface.
138 *
139 * A connection also maintains the state of operations sent over the
140 * connection.
141 *
142 * Serialised against concurrent create and destroy using the
143 * gb_connection_mutex.
144 *
145 * Return: A pointer to the new connection if successful, or an ERR_PTR
146 * otherwise.
147 */
148 static struct gb_connection *
149 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
150 struct gb_interface *intf,
151 struct gb_bundle *bundle, int cport_id,
152 gb_request_handler_t handler,
153 unsigned long flags)
154 {
155 struct gb_connection *connection;
156 int ret;
157
158 mutex_lock(&gb_connection_mutex);
159
160 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
161 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162 ret = -EBUSY;
163 goto err_unlock;
164 }
165
166 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
167 if (ret < 0) {
168 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
169 goto err_unlock;
170 }
171 hd_cport_id = ret;
172
173 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
174 if (!connection) {
175 ret = -ENOMEM;
176 goto err_hd_cport_release;
177 }
178
179 connection->hd_cport_id = hd_cport_id;
180 connection->intf_cport_id = cport_id;
181 connection->hd = hd;
182 connection->intf = intf;
183 connection->bundle = bundle;
184 connection->handler = handler;
185 connection->flags = flags;
186 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
187 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
188 connection->state = GB_CONNECTION_STATE_DISABLED;
189
190 atomic_set(&connection->op_cycle, 0);
191 mutex_init(&connection->mutex);
192 spin_lock_init(&connection->lock);
193 INIT_LIST_HEAD(&connection->operations);
194
195 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
196 dev_name(&hd->dev), hd_cport_id);
197 if (!connection->wq) {
198 ret = -ENOMEM;
199 goto err_free_connection;
200 }
201
202 kref_init(&connection->kref);
203
204 gb_connection_init_name(connection);
205
206 spin_lock_irq(&gb_connections_lock);
207 list_add(&connection->hd_links, &hd->connections);
208
209 if (bundle)
210 list_add(&connection->bundle_links, &bundle->connections);
211 else
212 INIT_LIST_HEAD(&connection->bundle_links);
213
214 spin_unlock_irq(&gb_connections_lock);
215
216 mutex_unlock(&gb_connection_mutex);
217
218 trace_gb_connection_create(connection);
219
220 return connection;
221
222 err_free_connection:
223 kfree(connection);
224 err_hd_cport_release:
225 gb_hd_cport_release(hd, hd_cport_id);
226 err_unlock:
227 mutex_unlock(&gb_connection_mutex);
228
229 return ERR_PTR(ret);
230 }
231
232 struct gb_connection *
233 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
234 gb_request_handler_t handler)
235 {
236 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
237 GB_CONNECTION_FLAG_HIGH_PRIO);
238 }
239
240 struct gb_connection *
241 gb_connection_create_control(struct gb_interface *intf)
242 {
243 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
244 GB_CONNECTION_FLAG_CONTROL |
245 GB_CONNECTION_FLAG_HIGH_PRIO);
246 }
247
248 struct gb_connection *
249 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
250 gb_request_handler_t handler)
251 {
252 struct gb_interface *intf = bundle->intf;
253
254 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
255 handler, 0);
256 }
257 EXPORT_SYMBOL_GPL(gb_connection_create);
258
259 struct gb_connection *
260 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
261 gb_request_handler_t handler,
262 unsigned long flags)
263 {
264 struct gb_interface *intf = bundle->intf;
265
266 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
267 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
268
269 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
270 handler, flags);
271 }
272 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
273
274 struct gb_connection *
275 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
276 unsigned long flags)
277 {
278 flags |= GB_CONNECTION_FLAG_OFFLOADED;
279
280 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
281 }
282 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
283
284 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
285 {
286 struct gb_host_device *hd = connection->hd;
287 int ret;
288
289 if (!hd->driver->cport_enable)
290 return 0;
291
292 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
293 connection->flags);
294 if (ret) {
295 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
296 connection->name, ret);
297 return ret;
298 }
299
300 return 0;
301 }
302
303 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
304 {
305 struct gb_host_device *hd = connection->hd;
306 int ret;
307
308 if (!hd->driver->cport_disable)
309 return;
310
311 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
312 if (ret) {
313 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
314 connection->name, ret);
315 }
316 }
317
318 static int gb_connection_hd_cport_connected(struct gb_connection *connection)
319 {
320 struct gb_host_device *hd = connection->hd;
321 int ret;
322
323 if (!hd->driver->cport_connected)
324 return 0;
325
326 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
327 if (ret) {
328 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
329 connection->name, ret);
330 return ret;
331 }
332
333 return 0;
334 }
335
336 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
337 {
338 struct gb_host_device *hd = connection->hd;
339 int ret;
340
341 if (!hd->driver->cport_flush)
342 return 0;
343
344 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
345 if (ret) {
346 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
347 connection->name, ret);
348 return ret;
349 }
350
351 return 0;
352 }
353
354 static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
355 {
356 struct gb_host_device *hd = connection->hd;
357 size_t peer_space;
358 int ret;
359
360 if (!hd->driver->cport_quiesce)
361 return 0;
362
363 peer_space = sizeof(struct gb_operation_msg_hdr) +
364 sizeof(struct gb_cport_shutdown_request);
365
366 if (connection->mode_switch)
367 peer_space += sizeof(struct gb_operation_msg_hdr);
368
369 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
370 peer_space,
371 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
372 if (ret) {
373 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
374 connection->name, ret);
375 return ret;
376 }
377
378 return 0;
379 }
380
381 static int gb_connection_hd_cport_clear(struct gb_connection *connection)
382 {
383 struct gb_host_device *hd = connection->hd;
384 int ret;
385
386 if (!hd->driver->cport_clear)
387 return 0;
388
389 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
390 if (ret) {
391 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
392 connection->name, ret);
393 return ret;
394 }
395
396 return 0;
397 }
398
399 /*
400 * Request the SVC to create a connection from AP's cport to interface's
401 * cport.
402 */
403 static int
404 gb_connection_svc_connection_create(struct gb_connection *connection)
405 {
406 struct gb_host_device *hd = connection->hd;
407 struct gb_interface *intf;
408 u8 cport_flags;
409 int ret;
410
411 if (gb_connection_is_static(connection))
412 return 0;
413
414 intf = connection->intf;
415
416 /*
417 * Enable either E2EFC or CSD, unless no flow control is requested.
418 */
419 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
420 if (gb_connection_flow_control_disabled(connection)) {
421 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
422 } else if (gb_connection_e2efc_enabled(connection)) {
423 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
424 GB_SVC_CPORT_FLAG_E2EFC;
425 }
426
427 ret = gb_svc_connection_create(hd->svc,
428 hd->svc->ap_intf_id,
429 connection->hd_cport_id,
430 intf->interface_id,
431 connection->intf_cport_id,
432 cport_flags);
433 if (ret) {
434 dev_err(&connection->hd->dev,
435 "%s: failed to create svc connection: %d\n",
436 connection->name, ret);
437 return ret;
438 }
439
440 return 0;
441 }
442
443 static void
444 gb_connection_svc_connection_destroy(struct gb_connection *connection)
445 {
446 if (gb_connection_is_static(connection))
447 return;
448
449 gb_svc_connection_destroy(connection->hd->svc,
450 connection->hd->svc->ap_intf_id,
451 connection->hd_cport_id,
452 connection->intf->interface_id,
453 connection->intf_cport_id);
454 }
455
456 /* Inform Interface about active CPorts */
457 static int gb_connection_control_connected(struct gb_connection *connection)
458 {
459 struct gb_control *control;
460 u16 cport_id = connection->intf_cport_id;
461 int ret;
462
463 if (gb_connection_is_static(connection))
464 return 0;
465
466 if (gb_connection_is_control(connection))
467 return 0;
468
469 control = connection->intf->control;
470
471 ret = gb_control_connected_operation(control, cport_id);
472 if (ret) {
473 dev_err(&connection->bundle->dev,
474 "failed to connect cport: %d\n", ret);
475 return ret;
476 }
477
478 return 0;
479 }
480
481 static void
482 gb_connection_control_disconnecting(struct gb_connection *connection)
483 {
484 struct gb_control *control;
485 u16 cport_id = connection->intf_cport_id;
486 int ret;
487
488 if (gb_connection_is_static(connection))
489 return;
490
491 control = connection->intf->control;
492
493 ret = gb_control_disconnecting_operation(control, cport_id);
494 if (ret) {
495 dev_err(&connection->hd->dev,
496 "%s: failed to send disconnecting: %d\n",
497 connection->name, ret);
498 }
499 }
500
501 static void
502 gb_connection_control_disconnected(struct gb_connection *connection)
503 {
504 struct gb_control *control;
505 u16 cport_id = connection->intf_cport_id;
506 int ret;
507
508 if (gb_connection_is_static(connection))
509 return;
510
511 control = connection->intf->control;
512
513 if (gb_connection_is_control(connection)) {
514 if (connection->mode_switch) {
515 ret = gb_control_mode_switch_operation(control);
516 if (ret) {
517 /*
518 * Allow mode switch to time out waiting for
519 * mailbox event.
520 */
521 return;
522 }
523 }
524
525 return;
526 }
527
528 ret = gb_control_disconnected_operation(control, cport_id);
529 if (ret) {
530 dev_warn(&connection->bundle->dev,
531 "failed to disconnect cport: %d\n", ret);
532 }
533 }
534
535 static int gb_connection_shutdown_operation(struct gb_connection *connection,
536 u8 phase)
537 {
538 struct gb_cport_shutdown_request *req;
539 struct gb_operation *operation;
540 int ret;
541
542 operation = gb_operation_create_core(connection,
543 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
544 sizeof(*req), 0, 0,
545 GFP_KERNEL);
546 if (!operation)
547 return -ENOMEM;
548
549 req = operation->request->payload;
550 req->phase = phase;
551
552 ret = gb_operation_request_send_sync(operation);
553
554 gb_operation_put(operation);
555
556 return ret;
557 }
558
559 static int gb_connection_cport_shutdown(struct gb_connection *connection,
560 u8 phase)
561 {
562 struct gb_host_device *hd = connection->hd;
563 const struct gb_hd_driver *drv = hd->driver;
564 int ret;
565
566 if (gb_connection_is_static(connection))
567 return 0;
568
569 if (gb_connection_is_offloaded(connection)) {
570 if (!drv->cport_shutdown)
571 return 0;
572
573 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
574 GB_OPERATION_TIMEOUT_DEFAULT);
575 } else {
576 ret = gb_connection_shutdown_operation(connection, phase);
577 }
578
579 if (ret) {
580 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
581 connection->name, phase, ret);
582 return ret;
583 }
584
585 return 0;
586 }
587
588 static int
589 gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
590 {
591 return gb_connection_cport_shutdown(connection, 1);
592 }
593
594 static int
595 gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
596 {
597 return gb_connection_cport_shutdown(connection, 2);
598 }
599
600 /*
601 * Cancel all active operations on a connection.
602 *
603 * Locking: Called with connection lock held and state set to DISABLED or
604 * DISCONNECTING.
605 */
606 static void gb_connection_cancel_operations(struct gb_connection *connection,
607 int errno)
608 __must_hold(&connection->lock)
609 {
610 struct gb_operation *operation;
611
612 while (!list_empty(&connection->operations)) {
613 operation = list_last_entry(&connection->operations,
614 struct gb_operation, links);
615 gb_operation_get(operation);
616 spin_unlock_irq(&connection->lock);
617
618 if (gb_operation_is_incoming(operation))
619 gb_operation_cancel_incoming(operation, errno);
620 else
621 gb_operation_cancel(operation, errno);
622
623 gb_operation_put(operation);
624
625 spin_lock_irq(&connection->lock);
626 }
627 }
628
629 /*
630 * Cancel all active incoming operations on a connection.
631 *
632 * Locking: Called with connection lock held and state set to ENABLED_TX.
633 */
634 static void
635 gb_connection_flush_incoming_operations(struct gb_connection *connection,
636 int errno)
637 __must_hold(&connection->lock)
638 {
639 struct gb_operation *operation;
640 bool incoming;
641
642 while (!list_empty(&connection->operations)) {
643 incoming = false;
644 list_for_each_entry(operation, &connection->operations,
645 links) {
646 if (gb_operation_is_incoming(operation)) {
647 gb_operation_get(operation);
648 incoming = true;
649 break;
650 }
651 }
652
653 if (!incoming)
654 break;
655
656 spin_unlock_irq(&connection->lock);
657
658 /* FIXME: flush, not cancel? */
659 gb_operation_cancel_incoming(operation, errno);
660 gb_operation_put(operation);
661
662 spin_lock_irq(&connection->lock);
663 }
664 }
665
666 /*
667 * _gb_connection_enable() - enable a connection
668 * @connection: connection to enable
669 * @rx: whether to enable incoming requests
670 *
671 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
672 * ENABLED_TX->ENABLED state transitions.
673 *
674 * Locking: Caller holds connection->mutex.
675 */
676 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
677 {
678 int ret;
679
680 /* Handle ENABLED_TX -> ENABLED transitions. */
681 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
682 if (!(connection->handler && rx))
683 return 0;
684
685 spin_lock_irq(&connection->lock);
686 connection->state = GB_CONNECTION_STATE_ENABLED;
687 spin_unlock_irq(&connection->lock);
688
689 return 0;
690 }
691
692 ret = gb_connection_hd_cport_enable(connection);
693 if (ret)
694 return ret;
695
696 ret = gb_connection_svc_connection_create(connection);
697 if (ret)
698 goto err_hd_cport_clear;
699
700 ret = gb_connection_hd_cport_connected(connection);
701 if (ret)
702 goto err_svc_connection_destroy;
703
704 spin_lock_irq(&connection->lock);
705 if (connection->handler && rx)
706 connection->state = GB_CONNECTION_STATE_ENABLED;
707 else
708 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
709 spin_unlock_irq(&connection->lock);
710
711 ret = gb_connection_control_connected(connection);
712 if (ret)
713 goto err_control_disconnecting;
714
715 return 0;
716
717 err_control_disconnecting:
718 spin_lock_irq(&connection->lock);
719 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
720 gb_connection_cancel_operations(connection, -ESHUTDOWN);
721 spin_unlock_irq(&connection->lock);
722
723 /* Transmit queue should already be empty. */
724 gb_connection_hd_cport_flush(connection);
725
726 gb_connection_control_disconnecting(connection);
727 gb_connection_cport_shutdown_phase_1(connection);
728 gb_connection_hd_cport_quiesce(connection);
729 gb_connection_cport_shutdown_phase_2(connection);
730 gb_connection_control_disconnected(connection);
731 connection->state = GB_CONNECTION_STATE_DISABLED;
732 err_svc_connection_destroy:
733 gb_connection_svc_connection_destroy(connection);
734 err_hd_cport_clear:
735 gb_connection_hd_cport_clear(connection);
736
737 gb_connection_hd_cport_disable(connection);
738
739 return ret;
740 }
741
742 int gb_connection_enable(struct gb_connection *connection)
743 {
744 int ret = 0;
745
746 mutex_lock(&connection->mutex);
747
748 if (connection->state == GB_CONNECTION_STATE_ENABLED)
749 goto out_unlock;
750
751 ret = _gb_connection_enable(connection, true);
752 if (!ret)
753 trace_gb_connection_enable(connection);
754
755 out_unlock:
756 mutex_unlock(&connection->mutex);
757
758 return ret;
759 }
760 EXPORT_SYMBOL_GPL(gb_connection_enable);
761
762 int gb_connection_enable_tx(struct gb_connection *connection)
763 {
764 int ret = 0;
765
766 mutex_lock(&connection->mutex);
767
768 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
769 ret = -EINVAL;
770 goto out_unlock;
771 }
772
773 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
774 goto out_unlock;
775
776 ret = _gb_connection_enable(connection, false);
777 if (!ret)
778 trace_gb_connection_enable(connection);
779
780 out_unlock:
781 mutex_unlock(&connection->mutex);
782
783 return ret;
784 }
785 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
786
787 void gb_connection_disable_rx(struct gb_connection *connection)
788 {
789 mutex_lock(&connection->mutex);
790
791 spin_lock_irq(&connection->lock);
792 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
793 spin_unlock_irq(&connection->lock);
794 goto out_unlock;
795 }
796 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
797 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
798 spin_unlock_irq(&connection->lock);
799
800 trace_gb_connection_disable(connection);
801
802 out_unlock:
803 mutex_unlock(&connection->mutex);
804 }
805 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
806
807 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
808 {
809 connection->mode_switch = true;
810 }
811
812 void gb_connection_mode_switch_complete(struct gb_connection *connection)
813 {
814 gb_connection_svc_connection_destroy(connection);
815 gb_connection_hd_cport_clear(connection);
816
817 gb_connection_hd_cport_disable(connection);
818
819 connection->mode_switch = false;
820 }
821
822 void gb_connection_disable(struct gb_connection *connection)
823 {
824 mutex_lock(&connection->mutex);
825
826 if (connection->state == GB_CONNECTION_STATE_DISABLED)
827 goto out_unlock;
828
829 trace_gb_connection_disable(connection);
830
831 spin_lock_irq(&connection->lock);
832 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
833 gb_connection_cancel_operations(connection, -ESHUTDOWN);
834 spin_unlock_irq(&connection->lock);
835
836 gb_connection_hd_cport_flush(connection);
837
838 gb_connection_control_disconnecting(connection);
839 gb_connection_cport_shutdown_phase_1(connection);
840 gb_connection_hd_cport_quiesce(connection);
841 gb_connection_cport_shutdown_phase_2(connection);
842 gb_connection_control_disconnected(connection);
843
844 connection->state = GB_CONNECTION_STATE_DISABLED;
845
846 /* control-connection tear down is deferred when mode switching */
847 if (!connection->mode_switch) {
848 gb_connection_svc_connection_destroy(connection);
849 gb_connection_hd_cport_clear(connection);
850
851 gb_connection_hd_cport_disable(connection);
852 }
853
854 out_unlock:
855 mutex_unlock(&connection->mutex);
856 }
857 EXPORT_SYMBOL_GPL(gb_connection_disable);
858
859 /* Disable a connection without communicating with the remote end. */
860 void gb_connection_disable_forced(struct gb_connection *connection)
861 {
862 mutex_lock(&connection->mutex);
863
864 if (connection->state == GB_CONNECTION_STATE_DISABLED)
865 goto out_unlock;
866
867 trace_gb_connection_disable(connection);
868
869 spin_lock_irq(&connection->lock);
870 connection->state = GB_CONNECTION_STATE_DISABLED;
871 gb_connection_cancel_operations(connection, -ESHUTDOWN);
872 spin_unlock_irq(&connection->lock);
873
874 gb_connection_hd_cport_flush(connection);
875
876 gb_connection_svc_connection_destroy(connection);
877 gb_connection_hd_cport_clear(connection);
878
879 gb_connection_hd_cport_disable(connection);
880 out_unlock:
881 mutex_unlock(&connection->mutex);
882 }
883 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
884
885 /* Caller must have disabled the connection before destroying it. */
886 void gb_connection_destroy(struct gb_connection *connection)
887 {
888 if (!connection)
889 return;
890
891 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
892 gb_connection_disable(connection);
893
894 mutex_lock(&gb_connection_mutex);
895
896 spin_lock_irq(&gb_connections_lock);
897 list_del(&connection->bundle_links);
898 list_del(&connection->hd_links);
899 spin_unlock_irq(&gb_connections_lock);
900
901 destroy_workqueue(connection->wq);
902
903 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
904 connection->hd_cport_id = CPORT_ID_BAD;
905
906 mutex_unlock(&gb_connection_mutex);
907
908 gb_connection_put(connection);
909 }
910 EXPORT_SYMBOL_GPL(gb_connection_destroy);
911
912 void gb_connection_latency_tag_enable(struct gb_connection *connection)
913 {
914 struct gb_host_device *hd = connection->hd;
915 int ret;
916
917 if (!hd->driver->latency_tag_enable)
918 return;
919
920 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
921 if (ret) {
922 dev_err(&connection->hd->dev,
923 "%s: failed to enable latency tag: %d\n",
924 connection->name, ret);
925 }
926 }
927 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
928
929 void gb_connection_latency_tag_disable(struct gb_connection *connection)
930 {
931 struct gb_host_device *hd = connection->hd;
932 int ret;
933
934 if (!hd->driver->latency_tag_disable)
935 return;
936
937 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
938 if (ret) {
939 dev_err(&connection->hd->dev,
940 "%s: failed to disable latency tag: %d\n",
941 connection->name, ret);
942 }
943 }
944 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);