4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/workqueue.h>
13 #include "greybus_trace.h"
16 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
19 static void gb_connection_kref_release(struct kref
*kref
);
22 static DEFINE_SPINLOCK(gb_connections_lock
);
23 static DEFINE_MUTEX(gb_connection_mutex
);
26 /* Caller holds gb_connection_mutex. */
27 static bool gb_connection_cport_in_use(struct gb_interface
*intf
, u16 cport_id
)
29 struct gb_host_device
*hd
= intf
->hd
;
30 struct gb_connection
*connection
;
32 list_for_each_entry(connection
, &hd
->connections
, hd_links
) {
33 if (connection
->intf
== intf
&&
34 connection
->intf_cport_id
== cport_id
)
41 static void gb_connection_get(struct gb_connection
*connection
)
43 kref_get(&connection
->kref
);
45 trace_gb_connection_get(connection
);
48 static void gb_connection_put(struct gb_connection
*connection
)
50 trace_gb_connection_put(connection
);
52 kref_put(&connection
->kref
, gb_connection_kref_release
);
56 * Returns a reference-counted pointer to the connection if found.
58 static struct gb_connection
*
59 gb_connection_hd_find(struct gb_host_device
*hd
, u16 cport_id
)
61 struct gb_connection
*connection
;
64 spin_lock_irqsave(&gb_connections_lock
, flags
);
65 list_for_each_entry(connection
, &hd
->connections
, hd_links
)
66 if (connection
->hd_cport_id
== cport_id
) {
67 gb_connection_get(connection
);
72 spin_unlock_irqrestore(&gb_connections_lock
, flags
);
78 * Callback from the host driver to let us know that data has been
79 * received on the bundle.
81 void greybus_data_rcvd(struct gb_host_device
*hd
, u16 cport_id
,
82 u8
*data
, size_t length
)
84 struct gb_connection
*connection
;
88 connection
= gb_connection_hd_find(hd
, cport_id
);
91 "nonexistent connection (%zu bytes dropped)\n", length
);
94 gb_connection_recv(connection
, data
, length
);
95 gb_connection_put(connection
);
97 EXPORT_SYMBOL_GPL(greybus_data_rcvd
);
99 static void gb_connection_kref_release(struct kref
*kref
)
101 struct gb_connection
*connection
;
103 connection
= container_of(kref
, struct gb_connection
, kref
);
105 trace_gb_connection_release(connection
);
110 static void gb_connection_init_name(struct gb_connection
*connection
)
112 u16 hd_cport_id
= connection
->hd_cport_id
;
116 if (connection
->intf
) {
117 intf_id
= connection
->intf
->interface_id
;
118 cport_id
= connection
->intf_cport_id
;
121 snprintf(connection
->name
, sizeof(connection
->name
),
122 "%u/%u:%u", hd_cport_id
, intf_id
, cport_id
);
126 * _gb_connection_create() - create a Greybus connection
127 * @hd: host device of the connection
128 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
129 * @intf: remote interface, or NULL for static connections
130 * @bundle: remote-interface bundle (may be NULL)
131 * @cport_id: remote-interface cport id, or 0 for static connections
132 * @handler: request handler (may be NULL)
133 * @flags: connection flags
135 * Create a Greybus connection, representing the bidirectional link
136 * between a CPort on a (local) Greybus host device and a CPort on
137 * another Greybus interface.
139 * A connection also maintains the state of operations sent over the
142 * Serialised against concurrent create and destroy using the
143 * gb_connection_mutex.
145 * Return: A pointer to the new connection if successful, or an ERR_PTR
148 static struct gb_connection
*
149 _gb_connection_create(struct gb_host_device
*hd
, int hd_cport_id
,
150 struct gb_interface
*intf
,
151 struct gb_bundle
*bundle
, int cport_id
,
152 gb_request_handler_t handler
,
155 struct gb_connection
*connection
;
158 mutex_lock(&gb_connection_mutex
);
160 if (intf
&& gb_connection_cport_in_use(intf
, cport_id
)) {
161 dev_err(&intf
->dev
, "cport %u already in use\n", cport_id
);
166 ret
= gb_hd_cport_allocate(hd
, hd_cport_id
, flags
);
168 dev_err(&hd
->dev
, "failed to allocate cport: %d\n", ret
);
173 connection
= kzalloc(sizeof(*connection
), GFP_KERNEL
);
176 goto err_hd_cport_release
;
179 connection
->hd_cport_id
= hd_cport_id
;
180 connection
->intf_cport_id
= cport_id
;
182 connection
->intf
= intf
;
183 connection
->bundle
= bundle
;
184 connection
->handler
= handler
;
185 connection
->flags
= flags
;
186 if (intf
&& (intf
->quirks
& GB_INTERFACE_QUIRK_NO_CPORT_FEATURES
))
187 connection
->flags
|= GB_CONNECTION_FLAG_NO_FLOWCTRL
;
188 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
190 atomic_set(&connection
->op_cycle
, 0);
191 mutex_init(&connection
->mutex
);
192 spin_lock_init(&connection
->lock
);
193 INIT_LIST_HEAD(&connection
->operations
);
195 connection
->wq
= alloc_workqueue("%s:%d", WQ_UNBOUND
, 1,
196 dev_name(&hd
->dev
), hd_cport_id
);
197 if (!connection
->wq
) {
199 goto err_free_connection
;
202 kref_init(&connection
->kref
);
204 gb_connection_init_name(connection
);
206 spin_lock_irq(&gb_connections_lock
);
207 list_add(&connection
->hd_links
, &hd
->connections
);
210 list_add(&connection
->bundle_links
, &bundle
->connections
);
212 INIT_LIST_HEAD(&connection
->bundle_links
);
214 spin_unlock_irq(&gb_connections_lock
);
216 mutex_unlock(&gb_connection_mutex
);
218 trace_gb_connection_create(connection
);
224 err_hd_cport_release
:
225 gb_hd_cport_release(hd
, hd_cport_id
);
227 mutex_unlock(&gb_connection_mutex
);
232 struct gb_connection
*
233 gb_connection_create_static(struct gb_host_device
*hd
, u16 hd_cport_id
,
234 gb_request_handler_t handler
)
236 return _gb_connection_create(hd
, hd_cport_id
, NULL
, NULL
, 0, handler
,
237 GB_CONNECTION_FLAG_HIGH_PRIO
);
240 struct gb_connection
*
241 gb_connection_create_control(struct gb_interface
*intf
)
243 return _gb_connection_create(intf
->hd
, -1, intf
, NULL
, 0, NULL
,
244 GB_CONNECTION_FLAG_CONTROL
|
245 GB_CONNECTION_FLAG_HIGH_PRIO
);
248 struct gb_connection
*
249 gb_connection_create(struct gb_bundle
*bundle
, u16 cport_id
,
250 gb_request_handler_t handler
)
252 struct gb_interface
*intf
= bundle
->intf
;
254 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
257 EXPORT_SYMBOL_GPL(gb_connection_create
);
259 struct gb_connection
*
260 gb_connection_create_flags(struct gb_bundle
*bundle
, u16 cport_id
,
261 gb_request_handler_t handler
,
264 struct gb_interface
*intf
= bundle
->intf
;
266 if (WARN_ON_ONCE(flags
& GB_CONNECTION_FLAG_CORE_MASK
))
267 flags
&= ~GB_CONNECTION_FLAG_CORE_MASK
;
269 return _gb_connection_create(intf
->hd
, -1, intf
, bundle
, cport_id
,
272 EXPORT_SYMBOL_GPL(gb_connection_create_flags
);
274 struct gb_connection
*
275 gb_connection_create_offloaded(struct gb_bundle
*bundle
, u16 cport_id
,
278 flags
|= GB_CONNECTION_FLAG_OFFLOADED
;
280 return gb_connection_create_flags(bundle
, cport_id
, NULL
, flags
);
282 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded
);
284 static int gb_connection_hd_cport_enable(struct gb_connection
*connection
)
286 struct gb_host_device
*hd
= connection
->hd
;
289 if (!hd
->driver
->cport_enable
)
292 ret
= hd
->driver
->cport_enable(hd
, connection
->hd_cport_id
,
295 dev_err(&hd
->dev
, "%s: failed to enable host cport: %d\n",
296 connection
->name
, ret
);
303 static void gb_connection_hd_cport_disable(struct gb_connection
*connection
)
305 struct gb_host_device
*hd
= connection
->hd
;
308 if (!hd
->driver
->cport_disable
)
311 ret
= hd
->driver
->cport_disable(hd
, connection
->hd_cport_id
);
313 dev_err(&hd
->dev
, "%s: failed to disable host cport: %d\n",
314 connection
->name
, ret
);
318 static int gb_connection_hd_cport_connected(struct gb_connection
*connection
)
320 struct gb_host_device
*hd
= connection
->hd
;
323 if (!hd
->driver
->cport_connected
)
326 ret
= hd
->driver
->cport_connected(hd
, connection
->hd_cport_id
);
328 dev_err(&hd
->dev
, "%s: failed to set connected state: %d\n",
329 connection
->name
, ret
);
336 static int gb_connection_hd_cport_flush(struct gb_connection
*connection
)
338 struct gb_host_device
*hd
= connection
->hd
;
341 if (!hd
->driver
->cport_flush
)
344 ret
= hd
->driver
->cport_flush(hd
, connection
->hd_cport_id
);
346 dev_err(&hd
->dev
, "%s: failed to flush host cport: %d\n",
347 connection
->name
, ret
);
354 static int gb_connection_hd_cport_quiesce(struct gb_connection
*connection
)
356 struct gb_host_device
*hd
= connection
->hd
;
360 if (!hd
->driver
->cport_quiesce
)
363 peer_space
= sizeof(struct gb_operation_msg_hdr
) +
364 sizeof(struct gb_cport_shutdown_request
);
366 if (connection
->mode_switch
)
367 peer_space
+= sizeof(struct gb_operation_msg_hdr
);
369 ret
= hd
->driver
->cport_quiesce(hd
, connection
->hd_cport_id
,
371 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT
);
373 dev_err(&hd
->dev
, "%s: failed to quiesce host cport: %d\n",
374 connection
->name
, ret
);
381 static int gb_connection_hd_cport_clear(struct gb_connection
*connection
)
383 struct gb_host_device
*hd
= connection
->hd
;
386 if (!hd
->driver
->cport_clear
)
389 ret
= hd
->driver
->cport_clear(hd
, connection
->hd_cport_id
);
391 dev_err(&hd
->dev
, "%s: failed to clear host cport: %d\n",
392 connection
->name
, ret
);
400 * Request the SVC to create a connection from AP's cport to interface's
404 gb_connection_svc_connection_create(struct gb_connection
*connection
)
406 struct gb_host_device
*hd
= connection
->hd
;
407 struct gb_interface
*intf
;
411 if (gb_connection_is_static(connection
))
414 intf
= connection
->intf
;
417 * Enable either E2EFC or CSD, unless no flow control is requested.
419 cport_flags
= GB_SVC_CPORT_FLAG_CSV_N
;
420 if (gb_connection_flow_control_disabled(connection
)) {
421 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
;
422 } else if (gb_connection_e2efc_enabled(connection
)) {
423 cport_flags
|= GB_SVC_CPORT_FLAG_CSD_N
|
424 GB_SVC_CPORT_FLAG_E2EFC
;
427 ret
= gb_svc_connection_create(hd
->svc
,
429 connection
->hd_cport_id
,
431 connection
->intf_cport_id
,
434 dev_err(&connection
->hd
->dev
,
435 "%s: failed to create svc connection: %d\n",
436 connection
->name
, ret
);
444 gb_connection_svc_connection_destroy(struct gb_connection
*connection
)
446 if (gb_connection_is_static(connection
))
449 gb_svc_connection_destroy(connection
->hd
->svc
,
450 connection
->hd
->svc
->ap_intf_id
,
451 connection
->hd_cport_id
,
452 connection
->intf
->interface_id
,
453 connection
->intf_cport_id
);
456 /* Inform Interface about active CPorts */
457 static int gb_connection_control_connected(struct gb_connection
*connection
)
459 struct gb_control
*control
;
460 u16 cport_id
= connection
->intf_cport_id
;
463 if (gb_connection_is_static(connection
))
466 if (gb_connection_is_control(connection
))
469 control
= connection
->intf
->control
;
471 ret
= gb_control_connected_operation(control
, cport_id
);
473 dev_err(&connection
->bundle
->dev
,
474 "failed to connect cport: %d\n", ret
);
482 gb_connection_control_disconnecting(struct gb_connection
*connection
)
484 struct gb_control
*control
;
485 u16 cport_id
= connection
->intf_cport_id
;
488 if (gb_connection_is_static(connection
))
491 control
= connection
->intf
->control
;
493 ret
= gb_control_disconnecting_operation(control
, cport_id
);
495 dev_err(&connection
->hd
->dev
,
496 "%s: failed to send disconnecting: %d\n",
497 connection
->name
, ret
);
502 gb_connection_control_disconnected(struct gb_connection
*connection
)
504 struct gb_control
*control
;
505 u16 cport_id
= connection
->intf_cport_id
;
508 if (gb_connection_is_static(connection
))
511 control
= connection
->intf
->control
;
513 if (gb_connection_is_control(connection
)) {
514 if (connection
->mode_switch
) {
515 ret
= gb_control_mode_switch_operation(control
);
518 * Allow mode switch to time out waiting for
528 ret
= gb_control_disconnected_operation(control
, cport_id
);
530 dev_warn(&connection
->bundle
->dev
,
531 "failed to disconnect cport: %d\n", ret
);
535 static int gb_connection_shutdown_operation(struct gb_connection
*connection
,
538 struct gb_cport_shutdown_request
*req
;
539 struct gb_operation
*operation
;
542 operation
= gb_operation_create_core(connection
,
543 GB_REQUEST_TYPE_CPORT_SHUTDOWN
,
549 req
= operation
->request
->payload
;
552 ret
= gb_operation_request_send_sync(operation
);
554 gb_operation_put(operation
);
559 static int gb_connection_cport_shutdown(struct gb_connection
*connection
,
562 struct gb_host_device
*hd
= connection
->hd
;
563 const struct gb_hd_driver
*drv
= hd
->driver
;
566 if (gb_connection_is_static(connection
))
569 if (gb_connection_is_offloaded(connection
)) {
570 if (!drv
->cport_shutdown
)
573 ret
= drv
->cport_shutdown(hd
, connection
->hd_cport_id
, phase
,
574 GB_OPERATION_TIMEOUT_DEFAULT
);
576 ret
= gb_connection_shutdown_operation(connection
, phase
);
580 dev_err(&hd
->dev
, "%s: failed to send cport shutdown (phase %d): %d\n",
581 connection
->name
, phase
, ret
);
589 gb_connection_cport_shutdown_phase_1(struct gb_connection
*connection
)
591 return gb_connection_cport_shutdown(connection
, 1);
595 gb_connection_cport_shutdown_phase_2(struct gb_connection
*connection
)
597 return gb_connection_cport_shutdown(connection
, 2);
601 * Cancel all active operations on a connection.
603 * Locking: Called with connection lock held and state set to DISABLED or
606 static void gb_connection_cancel_operations(struct gb_connection
*connection
,
608 __must_hold(&connection
->lock
)
610 struct gb_operation
*operation
;
612 while (!list_empty(&connection
->operations
)) {
613 operation
= list_last_entry(&connection
->operations
,
614 struct gb_operation
, links
);
615 gb_operation_get(operation
);
616 spin_unlock_irq(&connection
->lock
);
618 if (gb_operation_is_incoming(operation
))
619 gb_operation_cancel_incoming(operation
, errno
);
621 gb_operation_cancel(operation
, errno
);
623 gb_operation_put(operation
);
625 spin_lock_irq(&connection
->lock
);
630 * Cancel all active incoming operations on a connection.
632 * Locking: Called with connection lock held and state set to ENABLED_TX.
635 gb_connection_flush_incoming_operations(struct gb_connection
*connection
,
637 __must_hold(&connection
->lock
)
639 struct gb_operation
*operation
;
642 while (!list_empty(&connection
->operations
)) {
644 list_for_each_entry(operation
, &connection
->operations
,
646 if (gb_operation_is_incoming(operation
)) {
647 gb_operation_get(operation
);
656 spin_unlock_irq(&connection
->lock
);
658 /* FIXME: flush, not cancel? */
659 gb_operation_cancel_incoming(operation
, errno
);
660 gb_operation_put(operation
);
662 spin_lock_irq(&connection
->lock
);
667 * _gb_connection_enable() - enable a connection
668 * @connection: connection to enable
669 * @rx: whether to enable incoming requests
671 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
672 * ENABLED_TX->ENABLED state transitions.
674 * Locking: Caller holds connection->mutex.
676 static int _gb_connection_enable(struct gb_connection
*connection
, bool rx
)
680 /* Handle ENABLED_TX -> ENABLED transitions. */
681 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
) {
682 if (!(connection
->handler
&& rx
))
685 spin_lock_irq(&connection
->lock
);
686 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
687 spin_unlock_irq(&connection
->lock
);
692 ret
= gb_connection_hd_cport_enable(connection
);
696 ret
= gb_connection_svc_connection_create(connection
);
698 goto err_hd_cport_clear
;
700 ret
= gb_connection_hd_cport_connected(connection
);
702 goto err_svc_connection_destroy
;
704 spin_lock_irq(&connection
->lock
);
705 if (connection
->handler
&& rx
)
706 connection
->state
= GB_CONNECTION_STATE_ENABLED
;
708 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
709 spin_unlock_irq(&connection
->lock
);
711 ret
= gb_connection_control_connected(connection
);
713 goto err_control_disconnecting
;
717 err_control_disconnecting
:
718 spin_lock_irq(&connection
->lock
);
719 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
720 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
721 spin_unlock_irq(&connection
->lock
);
723 /* Transmit queue should already be empty. */
724 gb_connection_hd_cport_flush(connection
);
726 gb_connection_control_disconnecting(connection
);
727 gb_connection_cport_shutdown_phase_1(connection
);
728 gb_connection_hd_cport_quiesce(connection
);
729 gb_connection_cport_shutdown_phase_2(connection
);
730 gb_connection_control_disconnected(connection
);
731 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
732 err_svc_connection_destroy
:
733 gb_connection_svc_connection_destroy(connection
);
735 gb_connection_hd_cport_clear(connection
);
737 gb_connection_hd_cport_disable(connection
);
742 int gb_connection_enable(struct gb_connection
*connection
)
746 mutex_lock(&connection
->mutex
);
748 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
)
751 ret
= _gb_connection_enable(connection
, true);
753 trace_gb_connection_enable(connection
);
756 mutex_unlock(&connection
->mutex
);
760 EXPORT_SYMBOL_GPL(gb_connection_enable
);
762 int gb_connection_enable_tx(struct gb_connection
*connection
)
766 mutex_lock(&connection
->mutex
);
768 if (connection
->state
== GB_CONNECTION_STATE_ENABLED
) {
773 if (connection
->state
== GB_CONNECTION_STATE_ENABLED_TX
)
776 ret
= _gb_connection_enable(connection
, false);
778 trace_gb_connection_enable(connection
);
781 mutex_unlock(&connection
->mutex
);
785 EXPORT_SYMBOL_GPL(gb_connection_enable_tx
);
787 void gb_connection_disable_rx(struct gb_connection
*connection
)
789 mutex_lock(&connection
->mutex
);
791 spin_lock_irq(&connection
->lock
);
792 if (connection
->state
!= GB_CONNECTION_STATE_ENABLED
) {
793 spin_unlock_irq(&connection
->lock
);
796 connection
->state
= GB_CONNECTION_STATE_ENABLED_TX
;
797 gb_connection_flush_incoming_operations(connection
, -ESHUTDOWN
);
798 spin_unlock_irq(&connection
->lock
);
800 trace_gb_connection_disable(connection
);
803 mutex_unlock(&connection
->mutex
);
805 EXPORT_SYMBOL_GPL(gb_connection_disable_rx
);
807 void gb_connection_mode_switch_prepare(struct gb_connection
*connection
)
809 connection
->mode_switch
= true;
812 void gb_connection_mode_switch_complete(struct gb_connection
*connection
)
814 gb_connection_svc_connection_destroy(connection
);
815 gb_connection_hd_cport_clear(connection
);
817 gb_connection_hd_cport_disable(connection
);
819 connection
->mode_switch
= false;
822 void gb_connection_disable(struct gb_connection
*connection
)
824 mutex_lock(&connection
->mutex
);
826 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
829 trace_gb_connection_disable(connection
);
831 spin_lock_irq(&connection
->lock
);
832 connection
->state
= GB_CONNECTION_STATE_DISCONNECTING
;
833 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
834 spin_unlock_irq(&connection
->lock
);
836 gb_connection_hd_cport_flush(connection
);
838 gb_connection_control_disconnecting(connection
);
839 gb_connection_cport_shutdown_phase_1(connection
);
840 gb_connection_hd_cport_quiesce(connection
);
841 gb_connection_cport_shutdown_phase_2(connection
);
842 gb_connection_control_disconnected(connection
);
844 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
846 /* control-connection tear down is deferred when mode switching */
847 if (!connection
->mode_switch
) {
848 gb_connection_svc_connection_destroy(connection
);
849 gb_connection_hd_cport_clear(connection
);
851 gb_connection_hd_cport_disable(connection
);
855 mutex_unlock(&connection
->mutex
);
857 EXPORT_SYMBOL_GPL(gb_connection_disable
);
859 /* Disable a connection without communicating with the remote end. */
860 void gb_connection_disable_forced(struct gb_connection
*connection
)
862 mutex_lock(&connection
->mutex
);
864 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
)
867 trace_gb_connection_disable(connection
);
869 spin_lock_irq(&connection
->lock
);
870 connection
->state
= GB_CONNECTION_STATE_DISABLED
;
871 gb_connection_cancel_operations(connection
, -ESHUTDOWN
);
872 spin_unlock_irq(&connection
->lock
);
874 gb_connection_hd_cport_flush(connection
);
876 gb_connection_svc_connection_destroy(connection
);
877 gb_connection_hd_cport_clear(connection
);
879 gb_connection_hd_cport_disable(connection
);
881 mutex_unlock(&connection
->mutex
);
883 EXPORT_SYMBOL_GPL(gb_connection_disable_forced
);
885 /* Caller must have disabled the connection before destroying it. */
886 void gb_connection_destroy(struct gb_connection
*connection
)
891 if (WARN_ON(connection
->state
!= GB_CONNECTION_STATE_DISABLED
))
892 gb_connection_disable(connection
);
894 mutex_lock(&gb_connection_mutex
);
896 spin_lock_irq(&gb_connections_lock
);
897 list_del(&connection
->bundle_links
);
898 list_del(&connection
->hd_links
);
899 spin_unlock_irq(&gb_connections_lock
);
901 destroy_workqueue(connection
->wq
);
903 gb_hd_cport_release(connection
->hd
, connection
->hd_cport_id
);
904 connection
->hd_cport_id
= CPORT_ID_BAD
;
906 mutex_unlock(&gb_connection_mutex
);
908 gb_connection_put(connection
);
910 EXPORT_SYMBOL_GPL(gb_connection_destroy
);
912 void gb_connection_latency_tag_enable(struct gb_connection
*connection
)
914 struct gb_host_device
*hd
= connection
->hd
;
917 if (!hd
->driver
->latency_tag_enable
)
920 ret
= hd
->driver
->latency_tag_enable(hd
, connection
->hd_cport_id
);
922 dev_err(&connection
->hd
->dev
,
923 "%s: failed to enable latency tag: %d\n",
924 connection
->name
, ret
);
927 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable
);
929 void gb_connection_latency_tag_disable(struct gb_connection
*connection
)
931 struct gb_host_device
*hd
= connection
->hd
;
934 if (!hd
->driver
->latency_tag_disable
)
937 ret
= hd
->driver
->latency_tag_disable(hd
, connection
->hd_cport_id
);
939 dev_err(&connection
->hd
->dev
,
940 "%s: failed to disable latency tag: %d\n",
941 connection
->name
, ret
);
944 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable
);