1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
8 * Released under the GPLv2 only.
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/wait.h>
16 #include <linux/workqueue.h>
19 #include "greybus_trace.h"
21 static struct kmem_cache
*gb_operation_cache
;
22 static struct kmem_cache
*gb_message_cache
;
24 /* Workqueue to handle Greybus operation completions. */
25 static struct workqueue_struct
*gb_operation_completion_wq
;
27 /* Wait queue for synchronous cancellations. */
28 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue
);
31 * Protects updates to operation->errno.
33 static DEFINE_SPINLOCK(gb_operations_lock
);
35 static int gb_operation_response_send(struct gb_operation
*operation
,
39 * Increment operation active count and add to connection list unless the
40 * connection is going away.
42 * Caller holds operation reference.
44 static int gb_operation_get_active(struct gb_operation
*operation
)
46 struct gb_connection
*connection
= operation
->connection
;
49 spin_lock_irqsave(&connection
->lock
, flags
);
50 switch (connection
->state
) {
51 case GB_CONNECTION_STATE_ENABLED
:
53 case GB_CONNECTION_STATE_ENABLED_TX
:
54 if (gb_operation_is_incoming(operation
))
57 case GB_CONNECTION_STATE_DISCONNECTING
:
58 if (!gb_operation_is_core(operation
))
65 if (operation
->active
++ == 0)
66 list_add_tail(&operation
->links
, &connection
->operations
);
68 trace_gb_operation_get_active(operation
);
70 spin_unlock_irqrestore(&connection
->lock
, flags
);
75 spin_unlock_irqrestore(&connection
->lock
, flags
);
80 /* Caller holds operation reference. */
81 static void gb_operation_put_active(struct gb_operation
*operation
)
83 struct gb_connection
*connection
= operation
->connection
;
86 spin_lock_irqsave(&connection
->lock
, flags
);
88 trace_gb_operation_put_active(operation
);
90 if (--operation
->active
== 0) {
91 list_del(&operation
->links
);
92 if (atomic_read(&operation
->waiters
))
93 wake_up(&gb_operation_cancellation_queue
);
95 spin_unlock_irqrestore(&connection
->lock
, flags
);
98 static bool gb_operation_is_active(struct gb_operation
*operation
)
100 struct gb_connection
*connection
= operation
->connection
;
104 spin_lock_irqsave(&connection
->lock
, flags
);
105 ret
= operation
->active
;
106 spin_unlock_irqrestore(&connection
->lock
, flags
);
112 * Set an operation's result.
114 * Initially an outgoing operation's errno value is -EBADR.
115 * If no error occurs before sending the request message the only
116 * valid value operation->errno can be set to is -EINPROGRESS,
117 * indicating the request has been (or rather is about to be) sent.
118 * At that point nobody should be looking at the result until the
121 * The first time the result gets set after the request has been
122 * sent, that result "sticks." That is, if two concurrent threads
123 * race to set the result, the first one wins. The return value
124 * tells the caller whether its result was recorded; if not the
125 * caller has nothing more to do.
127 * The result value -EILSEQ is reserved to signal an implementation
128 * error; if it's ever observed, the code performing the request has
129 * done something fundamentally wrong. It is an error to try to set
130 * the result to -EBADR, and attempts to do so result in a warning,
131 * and -EILSEQ is used instead. Similarly, the only valid result
132 * value to set for an operation in initial state is -EINPROGRESS.
133 * Attempts to do otherwise will also record a (successful) -EILSEQ
136 static bool gb_operation_result_set(struct gb_operation
*operation
, int result
)
141 if (result
== -EINPROGRESS
) {
143 * -EINPROGRESS is used to indicate the request is
144 * in flight. It should be the first result value
145 * set after the initial -EBADR. Issue a warning
146 * and record an implementation error if it's
147 * set at any other time.
149 spin_lock_irqsave(&gb_operations_lock
, flags
);
150 prev
= operation
->errno
;
152 operation
->errno
= result
;
154 operation
->errno
= -EILSEQ
;
155 spin_unlock_irqrestore(&gb_operations_lock
, flags
);
156 WARN_ON(prev
!= -EBADR
);
162 * The first result value set after a request has been sent
163 * will be the final result of the operation. Subsequent
164 * attempts to set the result are ignored.
166 * Note that -EBADR is a reserved "initial state" result
167 * value. Attempts to set this value result in a warning,
168 * and the result code is set to -EILSEQ instead.
170 if (WARN_ON(result
== -EBADR
))
171 result
= -EILSEQ
; /* Nobody should be setting -EBADR */
173 spin_lock_irqsave(&gb_operations_lock
, flags
);
174 prev
= operation
->errno
;
175 if (prev
== -EINPROGRESS
)
176 operation
->errno
= result
; /* First and final result */
177 spin_unlock_irqrestore(&gb_operations_lock
, flags
);
179 return prev
== -EINPROGRESS
;
182 int gb_operation_result(struct gb_operation
*operation
)
184 int result
= operation
->errno
;
186 WARN_ON(result
== -EBADR
);
187 WARN_ON(result
== -EINPROGRESS
);
191 EXPORT_SYMBOL_GPL(gb_operation_result
);
194 * Looks up an outgoing operation on a connection and returns a refcounted
195 * pointer if found, or NULL otherwise.
197 static struct gb_operation
*
198 gb_operation_find_outgoing(struct gb_connection
*connection
, u16 operation_id
)
200 struct gb_operation
*operation
;
204 spin_lock_irqsave(&connection
->lock
, flags
);
205 list_for_each_entry(operation
, &connection
->operations
, links
)
206 if (operation
->id
== operation_id
&&
207 !gb_operation_is_incoming(operation
)) {
208 gb_operation_get(operation
);
212 spin_unlock_irqrestore(&connection
->lock
, flags
);
214 return found
? operation
: NULL
;
217 static int gb_message_send(struct gb_message
*message
, gfp_t gfp
)
219 struct gb_connection
*connection
= message
->operation
->connection
;
221 trace_gb_message_send(message
);
222 return connection
->hd
->driver
->message_send(connection
->hd
,
223 connection
->hd_cport_id
,
229 * Cancel a message we have passed to the host device layer to be sent.
231 static void gb_message_cancel(struct gb_message
*message
)
233 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
235 hd
->driver
->message_cancel(message
);
238 static void gb_operation_request_handle(struct gb_operation
*operation
)
240 struct gb_connection
*connection
= operation
->connection
;
244 if (connection
->handler
) {
245 status
= connection
->handler(operation
);
247 dev_err(&connection
->hd
->dev
,
248 "%s: unexpected incoming request of type 0x%02x\n",
249 connection
->name
, operation
->type
);
251 status
= -EPROTONOSUPPORT
;
254 ret
= gb_operation_response_send(operation
, status
);
256 dev_err(&connection
->hd
->dev
,
257 "%s: failed to send response %d for type 0x%02x: %d\n",
258 connection
->name
, status
, operation
->type
, ret
);
264 * Process operation work.
266 * For incoming requests, call the protocol request handler. The operation
267 * result should be -EINPROGRESS at this point.
269 * For outgoing requests, the operation result value should have
270 * been set before queueing this. The operation callback function
271 * allows the original requester to know the request has completed
272 * and its result is available.
274 static void gb_operation_work(struct work_struct
*work
)
276 struct gb_operation
*operation
;
279 operation
= container_of(work
, struct gb_operation
, work
);
281 if (gb_operation_is_incoming(operation
)) {
282 gb_operation_request_handle(operation
);
284 ret
= del_timer_sync(&operation
->timer
);
286 /* Cancel request message if scheduled by timeout. */
287 if (gb_operation_result(operation
) == -ETIMEDOUT
)
288 gb_message_cancel(operation
->request
);
291 operation
->callback(operation
);
294 gb_operation_put_active(operation
);
295 gb_operation_put(operation
);
298 static void gb_operation_timeout(unsigned long arg
)
300 struct gb_operation
*operation
= (void *)arg
;
302 if (gb_operation_result_set(operation
, -ETIMEDOUT
)) {
304 * A stuck request message will be cancelled from the
307 queue_work(gb_operation_completion_wq
, &operation
->work
);
311 static void gb_operation_message_init(struct gb_host_device
*hd
,
312 struct gb_message
*message
, u16 operation_id
,
313 size_t payload_size
, u8 type
)
315 struct gb_operation_msg_hdr
*header
;
317 header
= message
->buffer
;
319 message
->header
= header
;
320 message
->payload
= payload_size
? header
+ 1 : NULL
;
321 message
->payload_size
= payload_size
;
324 * The type supplied for incoming message buffers will be
325 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
326 * arriving data so there's no need to initialize the message header.
328 if (type
!= GB_REQUEST_TYPE_INVALID
) {
329 u16 message_size
= (u16
)(sizeof(*header
) + payload_size
);
332 * For a request, the operation id gets filled in
333 * when the message is sent. For a response, it
334 * will be copied from the request by the caller.
336 * The result field in a request message must be
337 * zero. It will be set just prior to sending for
340 header
->size
= cpu_to_le16(message_size
);
341 header
->operation_id
= 0;
348 * Allocate a message to be used for an operation request or response.
349 * Both types of message contain a common header. The request message
350 * for an outgoing operation is outbound, as is the response message
351 * for an incoming operation. The message header for an outbound
352 * message is partially initialized here.
354 * The headers for inbound messages don't need to be initialized;
355 * they'll be filled in by arriving data.
357 * Our message buffers have the following layout:
358 * message header \_ these combined are
359 * message payload / the message size
361 static struct gb_message
*
362 gb_operation_message_alloc(struct gb_host_device
*hd
, u8 type
,
363 size_t payload_size
, gfp_t gfp_flags
)
365 struct gb_message
*message
;
366 struct gb_operation_msg_hdr
*header
;
367 size_t message_size
= payload_size
+ sizeof(*header
);
369 if (message_size
> hd
->buffer_size_max
) {
370 dev_warn(&hd
->dev
, "requested message size too big (%zu > %zu)\n",
371 message_size
, hd
->buffer_size_max
);
375 /* Allocate the message structure and buffer. */
376 message
= kmem_cache_zalloc(gb_message_cache
, gfp_flags
);
380 message
->buffer
= kzalloc(message_size
, gfp_flags
);
381 if (!message
->buffer
)
382 goto err_free_message
;
384 /* Initialize the message. Operation id is filled in later. */
385 gb_operation_message_init(hd
, message
, 0, payload_size
, type
);
390 kmem_cache_free(gb_message_cache
, message
);
395 static void gb_operation_message_free(struct gb_message
*message
)
397 kfree(message
->buffer
);
398 kmem_cache_free(gb_message_cache
, message
);
402 * Map an enum gb_operation_status value (which is represented in a
403 * message as a single byte) to an appropriate Linux negative errno.
405 static int gb_operation_status_map(u8 status
)
410 case GB_OP_INTERRUPTED
:
414 case GB_OP_NO_MEMORY
:
416 case GB_OP_PROTOCOL_BAD
:
417 return -EPROTONOSUPPORT
;
424 case GB_OP_NONEXISTENT
:
426 case GB_OP_MALFUNCTION
:
428 case GB_OP_UNKNOWN_ERROR
:
435 * Map a Linux errno value (from operation->errno) into the value
436 * that should represent it in a response message status sent
437 * over the wire. Returns an enum gb_operation_status value (which
438 * is represented in a message as a single byte).
440 static u8
gb_operation_errno_map(int errno
)
444 return GB_OP_SUCCESS
;
446 return GB_OP_INTERRUPTED
;
448 return GB_OP_TIMEOUT
;
450 return GB_OP_NO_MEMORY
;
451 case -EPROTONOSUPPORT
:
452 return GB_OP_PROTOCOL_BAD
;
454 return GB_OP_OVERFLOW
; /* Could be underflow too */
456 return GB_OP_INVALID
;
460 return GB_OP_MALFUNCTION
;
462 return GB_OP_NONEXISTENT
;
465 return GB_OP_UNKNOWN_ERROR
;
469 bool gb_operation_response_alloc(struct gb_operation
*operation
,
470 size_t response_size
, gfp_t gfp
)
472 struct gb_host_device
*hd
= operation
->connection
->hd
;
473 struct gb_operation_msg_hdr
*request_header
;
474 struct gb_message
*response
;
477 type
= operation
->type
| GB_MESSAGE_TYPE_RESPONSE
;
478 response
= gb_operation_message_alloc(hd
, type
, response_size
, gfp
);
481 response
->operation
= operation
;
484 * Size and type get initialized when the message is
485 * allocated. The errno will be set before sending. All
486 * that's left is the operation id, which we copy from the
487 * request message header (as-is, in little-endian order).
489 request_header
= operation
->request
->header
;
490 response
->header
->operation_id
= request_header
->operation_id
;
491 operation
->response
= response
;
495 EXPORT_SYMBOL_GPL(gb_operation_response_alloc
);
498 * Create a Greybus operation to be sent over the given connection.
499 * The request buffer will be big enough for a payload of the given
502 * For outgoing requests, the request message's header will be
503 * initialized with the type of the request and the message size.
504 * Outgoing operations must also specify the response buffer size,
505 * which must be sufficient to hold all expected response data. The
506 * response message header will eventually be overwritten, so there's
507 * no need to initialize it here.
509 * Request messages for incoming operations can arrive in interrupt
510 * context, so they must be allocated with GFP_ATOMIC. In this case
511 * the request buffer will be immediately overwritten, so there is
512 * no need to initialize the message header. Responsibility for
513 * allocating a response buffer lies with the incoming request
514 * handler for a protocol. So we don't allocate that here.
516 * Returns a pointer to the new operation or a null pointer if an
519 static struct gb_operation
*
520 gb_operation_create_common(struct gb_connection
*connection
, u8 type
,
521 size_t request_size
, size_t response_size
,
522 unsigned long op_flags
, gfp_t gfp_flags
)
524 struct gb_host_device
*hd
= connection
->hd
;
525 struct gb_operation
*operation
;
527 operation
= kmem_cache_zalloc(gb_operation_cache
, gfp_flags
);
530 operation
->connection
= connection
;
532 operation
->request
= gb_operation_message_alloc(hd
, type
, request_size
,
534 if (!operation
->request
)
536 operation
->request
->operation
= operation
;
538 /* Allocate the response buffer for outgoing operations */
539 if (!(op_flags
& GB_OPERATION_FLAG_INCOMING
)) {
540 if (!gb_operation_response_alloc(operation
, response_size
,
545 setup_timer(&operation
->timer
, gb_operation_timeout
,
546 (unsigned long)operation
);
549 operation
->flags
= op_flags
;
550 operation
->type
= type
;
551 operation
->errno
= -EBADR
; /* Initial value--means "never set" */
553 INIT_WORK(&operation
->work
, gb_operation_work
);
554 init_completion(&operation
->completion
);
555 kref_init(&operation
->kref
);
556 atomic_set(&operation
->waiters
, 0);
561 gb_operation_message_free(operation
->request
);
563 kmem_cache_free(gb_operation_cache
, operation
);
569 * Create a new operation associated with the given connection. The
570 * request and response sizes provided are the number of bytes
571 * required to hold the request/response payload only. Both of
572 * these are allowed to be 0. Note that 0x00 is reserved as an
573 * invalid operation type for all protocols, and this is enforced
576 struct gb_operation
*
577 gb_operation_create_flags(struct gb_connection
*connection
,
578 u8 type
, size_t request_size
,
579 size_t response_size
, unsigned long flags
,
582 struct gb_operation
*operation
;
584 if (WARN_ON_ONCE(type
== GB_REQUEST_TYPE_INVALID
))
586 if (WARN_ON_ONCE(type
& GB_MESSAGE_TYPE_RESPONSE
))
587 type
&= ~GB_MESSAGE_TYPE_RESPONSE
;
589 if (WARN_ON_ONCE(flags
& ~GB_OPERATION_FLAG_USER_MASK
))
590 flags
&= GB_OPERATION_FLAG_USER_MASK
;
592 operation
= gb_operation_create_common(connection
, type
,
593 request_size
, response_size
,
596 trace_gb_operation_create(operation
);
600 EXPORT_SYMBOL_GPL(gb_operation_create_flags
);
602 struct gb_operation
*
603 gb_operation_create_core(struct gb_connection
*connection
,
604 u8 type
, size_t request_size
,
605 size_t response_size
, unsigned long flags
,
608 struct gb_operation
*operation
;
610 flags
|= GB_OPERATION_FLAG_CORE
;
612 operation
= gb_operation_create_common(connection
, type
,
613 request_size
, response_size
,
616 trace_gb_operation_create_core(operation
);
620 /* Do not export this function. */
622 size_t gb_operation_get_payload_size_max(struct gb_connection
*connection
)
624 struct gb_host_device
*hd
= connection
->hd
;
626 return hd
->buffer_size_max
- sizeof(struct gb_operation_msg_hdr
);
628 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max
);
630 static struct gb_operation
*
631 gb_operation_create_incoming(struct gb_connection
*connection
, u16 id
,
632 u8 type
, void *data
, size_t size
)
634 struct gb_operation
*operation
;
636 unsigned long flags
= GB_OPERATION_FLAG_INCOMING
;
638 /* Caller has made sure we at least have a message header. */
639 request_size
= size
- sizeof(struct gb_operation_msg_hdr
);
642 flags
|= GB_OPERATION_FLAG_UNIDIRECTIONAL
;
644 operation
= gb_operation_create_common(connection
, type
,
646 GB_REQUEST_TYPE_INVALID
,
652 memcpy(operation
->request
->header
, data
, size
);
653 trace_gb_operation_create_incoming(operation
);
659 * Get an additional reference on an operation.
661 void gb_operation_get(struct gb_operation
*operation
)
663 kref_get(&operation
->kref
);
665 EXPORT_SYMBOL_GPL(gb_operation_get
);
668 * Destroy a previously created operation.
670 static void _gb_operation_destroy(struct kref
*kref
)
672 struct gb_operation
*operation
;
674 operation
= container_of(kref
, struct gb_operation
, kref
);
676 trace_gb_operation_destroy(operation
);
678 if (operation
->response
)
679 gb_operation_message_free(operation
->response
);
680 gb_operation_message_free(operation
->request
);
682 kmem_cache_free(gb_operation_cache
, operation
);
686 * Drop a reference on an operation, and destroy it when the last
689 void gb_operation_put(struct gb_operation
*operation
)
691 if (WARN_ON(!operation
))
694 kref_put(&operation
->kref
, _gb_operation_destroy
);
696 EXPORT_SYMBOL_GPL(gb_operation_put
);
698 /* Tell the requester we're done */
699 static void gb_operation_sync_callback(struct gb_operation
*operation
)
701 complete(&operation
->completion
);
705 * gb_operation_request_send() - send an operation request message
706 * @operation: the operation to initiate
707 * @callback: the operation completion callback
708 * @timeout: operation timeout in milliseconds, or zero for no timeout
709 * @gfp: the memory flags to use for any allocations
711 * The caller has filled in any payload so the request message is ready to go.
712 * The callback function supplied will be called when the response message has
713 * arrived, a unidirectional request has been sent, or the operation is
714 * cancelled, indicating that the operation is complete. The callback function
715 * can fetch the result of the operation using gb_operation_result() if
718 * Return: 0 if the request was successfully queued in the host-driver queues,
719 * or a negative errno.
721 int gb_operation_request_send(struct gb_operation
*operation
,
722 gb_operation_callback callback
,
723 unsigned int timeout
,
726 struct gb_connection
*connection
= operation
->connection
;
727 struct gb_operation_msg_hdr
*header
;
731 if (gb_connection_is_offloaded(connection
))
738 * Record the callback function, which is executed in
739 * non-atomic (workqueue) context when the final result
740 * of an operation has been set.
742 operation
->callback
= callback
;
745 * Assign the operation's id, and store it in the request header.
746 * Zero is a reserved operation id for unidirectional operations.
748 if (gb_operation_is_unidirectional(operation
)) {
751 cycle
= (unsigned int)atomic_inc_return(&connection
->op_cycle
);
752 operation
->id
= (u16
)(cycle
% U16_MAX
+ 1);
755 header
= operation
->request
->header
;
756 header
->operation_id
= cpu_to_le16(operation
->id
);
758 gb_operation_result_set(operation
, -EINPROGRESS
);
761 * Get an extra reference on the operation. It'll be dropped when the
762 * operation completes.
764 gb_operation_get(operation
);
765 ret
= gb_operation_get_active(operation
);
769 ret
= gb_message_send(operation
->request
, gfp
);
774 operation
->timer
.expires
= jiffies
+ msecs_to_jiffies(timeout
);
775 add_timer(&operation
->timer
);
781 gb_operation_put_active(operation
);
783 gb_operation_put(operation
);
787 EXPORT_SYMBOL_GPL(gb_operation_request_send
);
790 * Send a synchronous operation. This function is expected to
791 * block, returning only when the response has arrived, (or when an
792 * error is detected. The return value is the result of the
795 int gb_operation_request_send_sync_timeout(struct gb_operation
*operation
,
796 unsigned int timeout
)
800 ret
= gb_operation_request_send(operation
, gb_operation_sync_callback
,
801 timeout
, GFP_KERNEL
);
805 ret
= wait_for_completion_interruptible(&operation
->completion
);
807 /* Cancel the operation if interrupted */
808 gb_operation_cancel(operation
, -ECANCELED
);
811 return gb_operation_result(operation
);
813 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout
);
816 * Send a response for an incoming operation request. A non-zero
817 * errno indicates a failed operation.
819 * If there is any response payload, the incoming request handler is
820 * responsible for allocating the response message. Otherwise the
821 * it can simply supply the result errno; this function will
822 * allocate the response message if necessary.
824 static int gb_operation_response_send(struct gb_operation
*operation
,
827 struct gb_connection
*connection
= operation
->connection
;
830 if (!operation
->response
&&
831 !gb_operation_is_unidirectional(operation
)) {
832 if (!gb_operation_response_alloc(operation
, 0, GFP_KERNEL
))
836 /* Record the result */
837 if (!gb_operation_result_set(operation
, errno
)) {
838 dev_err(&connection
->hd
->dev
, "request result already set\n");
839 return -EIO
; /* Shouldn't happen */
842 /* Sender of request does not care about response. */
843 if (gb_operation_is_unidirectional(operation
))
846 /* Reference will be dropped when message has been sent. */
847 gb_operation_get(operation
);
848 ret
= gb_operation_get_active(operation
);
852 /* Fill in the response header and send it */
853 operation
->response
->header
->result
= gb_operation_errno_map(errno
);
855 ret
= gb_message_send(operation
->response
, GFP_KERNEL
);
862 gb_operation_put_active(operation
);
864 gb_operation_put(operation
);
870 * This function is called when a message send request has completed.
872 void greybus_message_sent(struct gb_host_device
*hd
,
873 struct gb_message
*message
, int status
)
875 struct gb_operation
*operation
= message
->operation
;
876 struct gb_connection
*connection
= operation
->connection
;
879 * If the message was a response, we just need to drop our
880 * reference to the operation. If an error occurred, report
883 * For requests, if there's no error and the operation in not
884 * unidirectional, there's nothing more to do until the response
885 * arrives. If an error occurred attempting to send it, or if the
886 * operation is unidrectional, record the result of the operation and
887 * schedule its completion.
889 if (message
== operation
->response
) {
891 dev_err(&connection
->hd
->dev
,
892 "%s: error sending response 0x%02x: %d\n",
893 connection
->name
, operation
->type
, status
);
896 gb_operation_put_active(operation
);
897 gb_operation_put(operation
);
898 } else if (status
|| gb_operation_is_unidirectional(operation
)) {
899 if (gb_operation_result_set(operation
, status
)) {
900 queue_work(gb_operation_completion_wq
,
905 EXPORT_SYMBOL_GPL(greybus_message_sent
);
908 * We've received data on a connection, and it doesn't look like a
909 * response, so we assume it's a request.
911 * This is called in interrupt context, so just copy the incoming
912 * data into the request buffer and handle the rest via workqueue.
914 static void gb_connection_recv_request(struct gb_connection
*connection
,
915 const struct gb_operation_msg_hdr
*header
,
916 void *data
, size_t size
)
918 struct gb_operation
*operation
;
923 operation_id
= le16_to_cpu(header
->operation_id
);
926 operation
= gb_operation_create_incoming(connection
, operation_id
,
929 dev_err(&connection
->hd
->dev
,
930 "%s: can't create incoming operation\n",
935 ret
= gb_operation_get_active(operation
);
937 gb_operation_put(operation
);
940 trace_gb_message_recv_request(operation
->request
);
943 * The initial reference to the operation will be dropped when the
944 * request handler returns.
946 if (gb_operation_result_set(operation
, -EINPROGRESS
))
947 queue_work(connection
->wq
, &operation
->work
);
951 * We've received data that appears to be an operation response
952 * message. Look up the operation, and record that we've received
955 * This is called in interrupt context, so just copy the incoming
956 * data into the response buffer and handle the rest via workqueue.
958 static void gb_connection_recv_response(struct gb_connection
*connection
,
959 const struct gb_operation_msg_hdr
*header
,
960 void *data
, size_t size
)
962 struct gb_operation
*operation
;
963 struct gb_message
*message
;
968 operation_id
= le16_to_cpu(header
->operation_id
);
971 dev_err_ratelimited(&connection
->hd
->dev
,
972 "%s: invalid response id 0 received\n",
977 operation
= gb_operation_find_outgoing(connection
, operation_id
);
979 dev_err_ratelimited(&connection
->hd
->dev
,
980 "%s: unexpected response id 0x%04x received\n",
981 connection
->name
, operation_id
);
985 errno
= gb_operation_status_map(header
->result
);
986 message
= operation
->response
;
987 message_size
= sizeof(*header
) + message
->payload_size
;
988 if (!errno
&& size
> message_size
) {
989 dev_err_ratelimited(&connection
->hd
->dev
,
990 "%s: malformed response 0x%02x received (%zu > %zu)\n",
991 connection
->name
, header
->type
,
994 } else if (!errno
&& size
< message_size
) {
995 if (gb_operation_short_response_allowed(operation
)) {
996 message
->payload_size
= size
- sizeof(*header
);
998 dev_err_ratelimited(&connection
->hd
->dev
,
999 "%s: short response 0x%02x received (%zu < %zu)\n",
1000 connection
->name
, header
->type
,
1001 size
, message_size
);
1006 /* We must ignore the payload if a bad status is returned */
1008 size
= sizeof(*header
);
1010 /* The rest will be handled in work queue context */
1011 if (gb_operation_result_set(operation
, errno
)) {
1012 memcpy(message
->buffer
, data
, size
);
1014 trace_gb_message_recv_response(message
);
1016 queue_work(gb_operation_completion_wq
, &operation
->work
);
1019 gb_operation_put(operation
);
1023 * Handle data arriving on a connection. As soon as we return the
1024 * supplied data buffer will be reused (so unless we do something
1025 * with, it's effectively dropped).
1027 void gb_connection_recv(struct gb_connection
*connection
,
1028 void *data
, size_t size
)
1030 struct gb_operation_msg_hdr header
;
1031 struct device
*dev
= &connection
->hd
->dev
;
1034 if (connection
->state
== GB_CONNECTION_STATE_DISABLED
||
1035 gb_connection_is_offloaded(connection
)) {
1036 dev_warn_ratelimited(dev
, "%s: dropping %zu received bytes\n",
1037 connection
->name
, size
);
1041 if (size
< sizeof(header
)) {
1042 dev_err_ratelimited(dev
, "%s: short message received\n",
1047 /* Use memcpy as data may be unaligned */
1048 memcpy(&header
, data
, sizeof(header
));
1049 msg_size
= le16_to_cpu(header
.size
);
1050 if (size
< msg_size
) {
1051 dev_err_ratelimited(dev
,
1052 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1054 le16_to_cpu(header
.operation_id
),
1055 header
.type
, size
, msg_size
);
1056 return; /* XXX Should still complete operation */
1059 if (header
.type
& GB_MESSAGE_TYPE_RESPONSE
) {
1060 gb_connection_recv_response(connection
, &header
, data
,
1063 gb_connection_recv_request(connection
, &header
, data
,
1069 * Cancel an outgoing operation synchronously, and record the given error to
1072 void gb_operation_cancel(struct gb_operation
*operation
, int errno
)
1074 if (WARN_ON(gb_operation_is_incoming(operation
)))
1077 if (gb_operation_result_set(operation
, errno
)) {
1078 gb_message_cancel(operation
->request
);
1079 queue_work(gb_operation_completion_wq
, &operation
->work
);
1081 trace_gb_message_cancel_outgoing(operation
->request
);
1083 atomic_inc(&operation
->waiters
);
1084 wait_event(gb_operation_cancellation_queue
,
1085 !gb_operation_is_active(operation
));
1086 atomic_dec(&operation
->waiters
);
1088 EXPORT_SYMBOL_GPL(gb_operation_cancel
);
1091 * Cancel an incoming operation synchronously. Called during connection tear
1094 void gb_operation_cancel_incoming(struct gb_operation
*operation
, int errno
)
1096 if (WARN_ON(!gb_operation_is_incoming(operation
)))
1099 if (!gb_operation_is_unidirectional(operation
)) {
1101 * Make sure the request handler has submitted the response
1102 * before cancelling it.
1104 flush_work(&operation
->work
);
1105 if (!gb_operation_result_set(operation
, errno
))
1106 gb_message_cancel(operation
->response
);
1108 trace_gb_message_cancel_incoming(operation
->response
);
1110 atomic_inc(&operation
->waiters
);
1111 wait_event(gb_operation_cancellation_queue
,
1112 !gb_operation_is_active(operation
));
1113 atomic_dec(&operation
->waiters
);
1117 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1118 * @connection: the Greybus connection to send this to
1119 * @type: the type of operation to send
1120 * @request: pointer to a memory buffer to copy the request from
1121 * @request_size: size of @request
1122 * @response: pointer to a memory buffer to copy the response to
1123 * @response_size: the size of @response.
1124 * @timeout: operation timeout in milliseconds
1126 * This function implements a simple synchronous Greybus operation. It sends
1127 * the provided operation request and waits (sleeps) until the corresponding
1128 * operation response message has been successfully received, or an error
1129 * occurs. @request and @response are buffers to hold the request and response
1130 * data respectively, and if they are not NULL, their size must be specified in
1131 * @request_size and @response_size.
1133 * If a response payload is to come back, and @response is not NULL,
1134 * @response_size number of bytes will be copied into @response if the operation
1137 * If there is an error, the response buffer is left alone.
1139 int gb_operation_sync_timeout(struct gb_connection
*connection
, int type
,
1140 void *request
, int request_size
,
1141 void *response
, int response_size
,
1142 unsigned int timeout
)
1144 struct gb_operation
*operation
;
1147 if ((response_size
&& !response
) ||
1148 (request_size
&& !request
))
1151 operation
= gb_operation_create(connection
, type
,
1152 request_size
, response_size
,
1158 memcpy(operation
->request
->payload
, request
, request_size
);
1160 ret
= gb_operation_request_send_sync_timeout(operation
, timeout
);
1162 dev_err(&connection
->hd
->dev
,
1163 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1164 connection
->name
, operation
->id
, type
, ret
);
1166 if (response_size
) {
1167 memcpy(response
, operation
->response
->payload
,
1172 gb_operation_put(operation
);
1176 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout
);
1179 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1180 * @connection: connection to use
1181 * @type: type of operation to send
1182 * @request: memory buffer to copy the request from
1183 * @request_size: size of @request
1184 * @timeout: send timeout in milliseconds
1186 * Initiate a unidirectional operation by sending a request message and
1187 * waiting for it to be acknowledged as sent by the host device.
1189 * Note that successful send of a unidirectional operation does not imply that
1190 * the request as actually reached the remote end of the connection.
1192 int gb_operation_unidirectional_timeout(struct gb_connection
*connection
,
1193 int type
, void *request
, int request_size
,
1194 unsigned int timeout
)
1196 struct gb_operation
*operation
;
1199 if (request_size
&& !request
)
1202 operation
= gb_operation_create_flags(connection
, type
,
1204 GB_OPERATION_FLAG_UNIDIRECTIONAL
,
1210 memcpy(operation
->request
->payload
, request
, request_size
);
1212 ret
= gb_operation_request_send_sync_timeout(operation
, timeout
);
1214 dev_err(&connection
->hd
->dev
,
1215 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1216 connection
->name
, type
, ret
);
1219 gb_operation_put(operation
);
1223 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout
);
1225 int __init
gb_operation_init(void)
1227 gb_message_cache
= kmem_cache_create("gb_message_cache",
1228 sizeof(struct gb_message
), 0, 0, NULL
);
1229 if (!gb_message_cache
)
1232 gb_operation_cache
= kmem_cache_create("gb_operation_cache",
1233 sizeof(struct gb_operation
), 0, 0, NULL
);
1234 if (!gb_operation_cache
)
1235 goto err_destroy_message_cache
;
1237 gb_operation_completion_wq
= alloc_workqueue("greybus_completion",
1239 if (!gb_operation_completion_wq
)
1240 goto err_destroy_operation_cache
;
1244 err_destroy_operation_cache
:
1245 kmem_cache_destroy(gb_operation_cache
);
1246 gb_operation_cache
= NULL
;
1247 err_destroy_message_cache
:
1248 kmem_cache_destroy(gb_message_cache
);
1249 gb_message_cache
= NULL
;
1254 void gb_operation_exit(void)
1256 destroy_workqueue(gb_operation_completion_wq
);
1257 gb_operation_completion_wq
= NULL
;
1258 kmem_cache_destroy(gb_operation_cache
);
1259 gb_operation_cache
= NULL
;
1260 kmem_cache_destroy(gb_message_cache
);
1261 gb_message_cache
= NULL
;