]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/greybus/operation.c
staging: greybus: add SPDX identifiers to all greybus driver files
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / greybus / operation.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Greybus operations
4 *
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
7 *
8 * Released under the GPLv2 only.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/wait.h>
16 #include <linux/workqueue.h>
17
18 #include "greybus.h"
19 #include "greybus_trace.h"
20
21 static struct kmem_cache *gb_operation_cache;
22 static struct kmem_cache *gb_message_cache;
23
24 /* Workqueue to handle Greybus operation completions. */
25 static struct workqueue_struct *gb_operation_completion_wq;
26
27 /* Wait queue for synchronous cancellations. */
28 static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
29
30 /*
31 * Protects updates to operation->errno.
32 */
33 static DEFINE_SPINLOCK(gb_operations_lock);
34
35 static int gb_operation_response_send(struct gb_operation *operation,
36 int errno);
37
38 /*
39 * Increment operation active count and add to connection list unless the
40 * connection is going away.
41 *
42 * Caller holds operation reference.
43 */
44 static int gb_operation_get_active(struct gb_operation *operation)
45 {
46 struct gb_connection *connection = operation->connection;
47 unsigned long flags;
48
49 spin_lock_irqsave(&connection->lock, flags);
50 switch (connection->state) {
51 case GB_CONNECTION_STATE_ENABLED:
52 break;
53 case GB_CONNECTION_STATE_ENABLED_TX:
54 if (gb_operation_is_incoming(operation))
55 goto err_unlock;
56 break;
57 case GB_CONNECTION_STATE_DISCONNECTING:
58 if (!gb_operation_is_core(operation))
59 goto err_unlock;
60 break;
61 default:
62 goto err_unlock;
63 }
64
65 if (operation->active++ == 0)
66 list_add_tail(&operation->links, &connection->operations);
67
68 trace_gb_operation_get_active(operation);
69
70 spin_unlock_irqrestore(&connection->lock, flags);
71
72 return 0;
73
74 err_unlock:
75 spin_unlock_irqrestore(&connection->lock, flags);
76
77 return -ENOTCONN;
78 }
79
80 /* Caller holds operation reference. */
81 static void gb_operation_put_active(struct gb_operation *operation)
82 {
83 struct gb_connection *connection = operation->connection;
84 unsigned long flags;
85
86 spin_lock_irqsave(&connection->lock, flags);
87
88 trace_gb_operation_put_active(operation);
89
90 if (--operation->active == 0) {
91 list_del(&operation->links);
92 if (atomic_read(&operation->waiters))
93 wake_up(&gb_operation_cancellation_queue);
94 }
95 spin_unlock_irqrestore(&connection->lock, flags);
96 }
97
98 static bool gb_operation_is_active(struct gb_operation *operation)
99 {
100 struct gb_connection *connection = operation->connection;
101 unsigned long flags;
102 bool ret;
103
104 spin_lock_irqsave(&connection->lock, flags);
105 ret = operation->active;
106 spin_unlock_irqrestore(&connection->lock, flags);
107
108 return ret;
109 }
110
111 /*
112 * Set an operation's result.
113 *
114 * Initially an outgoing operation's errno value is -EBADR.
115 * If no error occurs before sending the request message the only
116 * valid value operation->errno can be set to is -EINPROGRESS,
117 * indicating the request has been (or rather is about to be) sent.
118 * At that point nobody should be looking at the result until the
119 * response arrives.
120 *
121 * The first time the result gets set after the request has been
122 * sent, that result "sticks." That is, if two concurrent threads
123 * race to set the result, the first one wins. The return value
124 * tells the caller whether its result was recorded; if not the
125 * caller has nothing more to do.
126 *
127 * The result value -EILSEQ is reserved to signal an implementation
128 * error; if it's ever observed, the code performing the request has
129 * done something fundamentally wrong. It is an error to try to set
130 * the result to -EBADR, and attempts to do so result in a warning,
131 * and -EILSEQ is used instead. Similarly, the only valid result
132 * value to set for an operation in initial state is -EINPROGRESS.
133 * Attempts to do otherwise will also record a (successful) -EILSEQ
134 * operation result.
135 */
136 static bool gb_operation_result_set(struct gb_operation *operation, int result)
137 {
138 unsigned long flags;
139 int prev;
140
141 if (result == -EINPROGRESS) {
142 /*
143 * -EINPROGRESS is used to indicate the request is
144 * in flight. It should be the first result value
145 * set after the initial -EBADR. Issue a warning
146 * and record an implementation error if it's
147 * set at any other time.
148 */
149 spin_lock_irqsave(&gb_operations_lock, flags);
150 prev = operation->errno;
151 if (prev == -EBADR)
152 operation->errno = result;
153 else
154 operation->errno = -EILSEQ;
155 spin_unlock_irqrestore(&gb_operations_lock, flags);
156 WARN_ON(prev != -EBADR);
157
158 return true;
159 }
160
161 /*
162 * The first result value set after a request has been sent
163 * will be the final result of the operation. Subsequent
164 * attempts to set the result are ignored.
165 *
166 * Note that -EBADR is a reserved "initial state" result
167 * value. Attempts to set this value result in a warning,
168 * and the result code is set to -EILSEQ instead.
169 */
170 if (WARN_ON(result == -EBADR))
171 result = -EILSEQ; /* Nobody should be setting -EBADR */
172
173 spin_lock_irqsave(&gb_operations_lock, flags);
174 prev = operation->errno;
175 if (prev == -EINPROGRESS)
176 operation->errno = result; /* First and final result */
177 spin_unlock_irqrestore(&gb_operations_lock, flags);
178
179 return prev == -EINPROGRESS;
180 }
181
182 int gb_operation_result(struct gb_operation *operation)
183 {
184 int result = operation->errno;
185
186 WARN_ON(result == -EBADR);
187 WARN_ON(result == -EINPROGRESS);
188
189 return result;
190 }
191 EXPORT_SYMBOL_GPL(gb_operation_result);
192
193 /*
194 * Looks up an outgoing operation on a connection and returns a refcounted
195 * pointer if found, or NULL otherwise.
196 */
197 static struct gb_operation *
198 gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
199 {
200 struct gb_operation *operation;
201 unsigned long flags;
202 bool found = false;
203
204 spin_lock_irqsave(&connection->lock, flags);
205 list_for_each_entry(operation, &connection->operations, links)
206 if (operation->id == operation_id &&
207 !gb_operation_is_incoming(operation)) {
208 gb_operation_get(operation);
209 found = true;
210 break;
211 }
212 spin_unlock_irqrestore(&connection->lock, flags);
213
214 return found ? operation : NULL;
215 }
216
217 static int gb_message_send(struct gb_message *message, gfp_t gfp)
218 {
219 struct gb_connection *connection = message->operation->connection;
220
221 trace_gb_message_send(message);
222 return connection->hd->driver->message_send(connection->hd,
223 connection->hd_cport_id,
224 message,
225 gfp);
226 }
227
228 /*
229 * Cancel a message we have passed to the host device layer to be sent.
230 */
231 static void gb_message_cancel(struct gb_message *message)
232 {
233 struct gb_host_device *hd = message->operation->connection->hd;
234
235 hd->driver->message_cancel(message);
236 }
237
238 static void gb_operation_request_handle(struct gb_operation *operation)
239 {
240 struct gb_connection *connection = operation->connection;
241 int status;
242 int ret;
243
244 if (connection->handler) {
245 status = connection->handler(operation);
246 } else {
247 dev_err(&connection->hd->dev,
248 "%s: unexpected incoming request of type 0x%02x\n",
249 connection->name, operation->type);
250
251 status = -EPROTONOSUPPORT;
252 }
253
254 ret = gb_operation_response_send(operation, status);
255 if (ret) {
256 dev_err(&connection->hd->dev,
257 "%s: failed to send response %d for type 0x%02x: %d\n",
258 connection->name, status, operation->type, ret);
259 return;
260 }
261 }
262
263 /*
264 * Process operation work.
265 *
266 * For incoming requests, call the protocol request handler. The operation
267 * result should be -EINPROGRESS at this point.
268 *
269 * For outgoing requests, the operation result value should have
270 * been set before queueing this. The operation callback function
271 * allows the original requester to know the request has completed
272 * and its result is available.
273 */
274 static void gb_operation_work(struct work_struct *work)
275 {
276 struct gb_operation *operation;
277 int ret;
278
279 operation = container_of(work, struct gb_operation, work);
280
281 if (gb_operation_is_incoming(operation)) {
282 gb_operation_request_handle(operation);
283 } else {
284 ret = del_timer_sync(&operation->timer);
285 if (!ret) {
286 /* Cancel request message if scheduled by timeout. */
287 if (gb_operation_result(operation) == -ETIMEDOUT)
288 gb_message_cancel(operation->request);
289 }
290
291 operation->callback(operation);
292 }
293
294 gb_operation_put_active(operation);
295 gb_operation_put(operation);
296 }
297
298 static void gb_operation_timeout(unsigned long arg)
299 {
300 struct gb_operation *operation = (void *)arg;
301
302 if (gb_operation_result_set(operation, -ETIMEDOUT)) {
303 /*
304 * A stuck request message will be cancelled from the
305 * workqueue.
306 */
307 queue_work(gb_operation_completion_wq, &operation->work);
308 }
309 }
310
311 static void gb_operation_message_init(struct gb_host_device *hd,
312 struct gb_message *message, u16 operation_id,
313 size_t payload_size, u8 type)
314 {
315 struct gb_operation_msg_hdr *header;
316
317 header = message->buffer;
318
319 message->header = header;
320 message->payload = payload_size ? header + 1 : NULL;
321 message->payload_size = payload_size;
322
323 /*
324 * The type supplied for incoming message buffers will be
325 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
326 * arriving data so there's no need to initialize the message header.
327 */
328 if (type != GB_REQUEST_TYPE_INVALID) {
329 u16 message_size = (u16)(sizeof(*header) + payload_size);
330
331 /*
332 * For a request, the operation id gets filled in
333 * when the message is sent. For a response, it
334 * will be copied from the request by the caller.
335 *
336 * The result field in a request message must be
337 * zero. It will be set just prior to sending for
338 * a response.
339 */
340 header->size = cpu_to_le16(message_size);
341 header->operation_id = 0;
342 header->type = type;
343 header->result = 0;
344 }
345 }
346
347 /*
348 * Allocate a message to be used for an operation request or response.
349 * Both types of message contain a common header. The request message
350 * for an outgoing operation is outbound, as is the response message
351 * for an incoming operation. The message header for an outbound
352 * message is partially initialized here.
353 *
354 * The headers for inbound messages don't need to be initialized;
355 * they'll be filled in by arriving data.
356 *
357 * Our message buffers have the following layout:
358 * message header \_ these combined are
359 * message payload / the message size
360 */
361 static struct gb_message *
362 gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
363 size_t payload_size, gfp_t gfp_flags)
364 {
365 struct gb_message *message;
366 struct gb_operation_msg_hdr *header;
367 size_t message_size = payload_size + sizeof(*header);
368
369 if (message_size > hd->buffer_size_max) {
370 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
371 message_size, hd->buffer_size_max);
372 return NULL;
373 }
374
375 /* Allocate the message structure and buffer. */
376 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
377 if (!message)
378 return NULL;
379
380 message->buffer = kzalloc(message_size, gfp_flags);
381 if (!message->buffer)
382 goto err_free_message;
383
384 /* Initialize the message. Operation id is filled in later. */
385 gb_operation_message_init(hd, message, 0, payload_size, type);
386
387 return message;
388
389 err_free_message:
390 kmem_cache_free(gb_message_cache, message);
391
392 return NULL;
393 }
394
395 static void gb_operation_message_free(struct gb_message *message)
396 {
397 kfree(message->buffer);
398 kmem_cache_free(gb_message_cache, message);
399 }
400
401 /*
402 * Map an enum gb_operation_status value (which is represented in a
403 * message as a single byte) to an appropriate Linux negative errno.
404 */
405 static int gb_operation_status_map(u8 status)
406 {
407 switch (status) {
408 case GB_OP_SUCCESS:
409 return 0;
410 case GB_OP_INTERRUPTED:
411 return -EINTR;
412 case GB_OP_TIMEOUT:
413 return -ETIMEDOUT;
414 case GB_OP_NO_MEMORY:
415 return -ENOMEM;
416 case GB_OP_PROTOCOL_BAD:
417 return -EPROTONOSUPPORT;
418 case GB_OP_OVERFLOW:
419 return -EMSGSIZE;
420 case GB_OP_INVALID:
421 return -EINVAL;
422 case GB_OP_RETRY:
423 return -EAGAIN;
424 case GB_OP_NONEXISTENT:
425 return -ENODEV;
426 case GB_OP_MALFUNCTION:
427 return -EILSEQ;
428 case GB_OP_UNKNOWN_ERROR:
429 default:
430 return -EIO;
431 }
432 }
433
434 /*
435 * Map a Linux errno value (from operation->errno) into the value
436 * that should represent it in a response message status sent
437 * over the wire. Returns an enum gb_operation_status value (which
438 * is represented in a message as a single byte).
439 */
440 static u8 gb_operation_errno_map(int errno)
441 {
442 switch (errno) {
443 case 0:
444 return GB_OP_SUCCESS;
445 case -EINTR:
446 return GB_OP_INTERRUPTED;
447 case -ETIMEDOUT:
448 return GB_OP_TIMEOUT;
449 case -ENOMEM:
450 return GB_OP_NO_MEMORY;
451 case -EPROTONOSUPPORT:
452 return GB_OP_PROTOCOL_BAD;
453 case -EMSGSIZE:
454 return GB_OP_OVERFLOW; /* Could be underflow too */
455 case -EINVAL:
456 return GB_OP_INVALID;
457 case -EAGAIN:
458 return GB_OP_RETRY;
459 case -EILSEQ:
460 return GB_OP_MALFUNCTION;
461 case -ENODEV:
462 return GB_OP_NONEXISTENT;
463 case -EIO:
464 default:
465 return GB_OP_UNKNOWN_ERROR;
466 }
467 }
468
469 bool gb_operation_response_alloc(struct gb_operation *operation,
470 size_t response_size, gfp_t gfp)
471 {
472 struct gb_host_device *hd = operation->connection->hd;
473 struct gb_operation_msg_hdr *request_header;
474 struct gb_message *response;
475 u8 type;
476
477 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
478 response = gb_operation_message_alloc(hd, type, response_size, gfp);
479 if (!response)
480 return false;
481 response->operation = operation;
482
483 /*
484 * Size and type get initialized when the message is
485 * allocated. The errno will be set before sending. All
486 * that's left is the operation id, which we copy from the
487 * request message header (as-is, in little-endian order).
488 */
489 request_header = operation->request->header;
490 response->header->operation_id = request_header->operation_id;
491 operation->response = response;
492
493 return true;
494 }
495 EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
496
497 /*
498 * Create a Greybus operation to be sent over the given connection.
499 * The request buffer will be big enough for a payload of the given
500 * size.
501 *
502 * For outgoing requests, the request message's header will be
503 * initialized with the type of the request and the message size.
504 * Outgoing operations must also specify the response buffer size,
505 * which must be sufficient to hold all expected response data. The
506 * response message header will eventually be overwritten, so there's
507 * no need to initialize it here.
508 *
509 * Request messages for incoming operations can arrive in interrupt
510 * context, so they must be allocated with GFP_ATOMIC. In this case
511 * the request buffer will be immediately overwritten, so there is
512 * no need to initialize the message header. Responsibility for
513 * allocating a response buffer lies with the incoming request
514 * handler for a protocol. So we don't allocate that here.
515 *
516 * Returns a pointer to the new operation or a null pointer if an
517 * error occurs.
518 */
519 static struct gb_operation *
520 gb_operation_create_common(struct gb_connection *connection, u8 type,
521 size_t request_size, size_t response_size,
522 unsigned long op_flags, gfp_t gfp_flags)
523 {
524 struct gb_host_device *hd = connection->hd;
525 struct gb_operation *operation;
526
527 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
528 if (!operation)
529 return NULL;
530 operation->connection = connection;
531
532 operation->request = gb_operation_message_alloc(hd, type, request_size,
533 gfp_flags);
534 if (!operation->request)
535 goto err_cache;
536 operation->request->operation = operation;
537
538 /* Allocate the response buffer for outgoing operations */
539 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
540 if (!gb_operation_response_alloc(operation, response_size,
541 gfp_flags)) {
542 goto err_request;
543 }
544
545 setup_timer(&operation->timer, gb_operation_timeout,
546 (unsigned long)operation);
547 }
548
549 operation->flags = op_flags;
550 operation->type = type;
551 operation->errno = -EBADR; /* Initial value--means "never set" */
552
553 INIT_WORK(&operation->work, gb_operation_work);
554 init_completion(&operation->completion);
555 kref_init(&operation->kref);
556 atomic_set(&operation->waiters, 0);
557
558 return operation;
559
560 err_request:
561 gb_operation_message_free(operation->request);
562 err_cache:
563 kmem_cache_free(gb_operation_cache, operation);
564
565 return NULL;
566 }
567
568 /*
569 * Create a new operation associated with the given connection. The
570 * request and response sizes provided are the number of bytes
571 * required to hold the request/response payload only. Both of
572 * these are allowed to be 0. Note that 0x00 is reserved as an
573 * invalid operation type for all protocols, and this is enforced
574 * here.
575 */
576 struct gb_operation *
577 gb_operation_create_flags(struct gb_connection *connection,
578 u8 type, size_t request_size,
579 size_t response_size, unsigned long flags,
580 gfp_t gfp)
581 {
582 struct gb_operation *operation;
583
584 if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
585 return NULL;
586 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
587 type &= ~GB_MESSAGE_TYPE_RESPONSE;
588
589 if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
590 flags &= GB_OPERATION_FLAG_USER_MASK;
591
592 operation = gb_operation_create_common(connection, type,
593 request_size, response_size,
594 flags, gfp);
595 if (operation)
596 trace_gb_operation_create(operation);
597
598 return operation;
599 }
600 EXPORT_SYMBOL_GPL(gb_operation_create_flags);
601
602 struct gb_operation *
603 gb_operation_create_core(struct gb_connection *connection,
604 u8 type, size_t request_size,
605 size_t response_size, unsigned long flags,
606 gfp_t gfp)
607 {
608 struct gb_operation *operation;
609
610 flags |= GB_OPERATION_FLAG_CORE;
611
612 operation = gb_operation_create_common(connection, type,
613 request_size, response_size,
614 flags, gfp);
615 if (operation)
616 trace_gb_operation_create_core(operation);
617
618 return operation;
619 }
620 /* Do not export this function. */
621
622 size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
623 {
624 struct gb_host_device *hd = connection->hd;
625
626 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
627 }
628 EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
629
630 static struct gb_operation *
631 gb_operation_create_incoming(struct gb_connection *connection, u16 id,
632 u8 type, void *data, size_t size)
633 {
634 struct gb_operation *operation;
635 size_t request_size;
636 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
637
638 /* Caller has made sure we at least have a message header. */
639 request_size = size - sizeof(struct gb_operation_msg_hdr);
640
641 if (!id)
642 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
643
644 operation = gb_operation_create_common(connection, type,
645 request_size,
646 GB_REQUEST_TYPE_INVALID,
647 flags, GFP_ATOMIC);
648 if (!operation)
649 return NULL;
650
651 operation->id = id;
652 memcpy(operation->request->header, data, size);
653 trace_gb_operation_create_incoming(operation);
654
655 return operation;
656 }
657
658 /*
659 * Get an additional reference on an operation.
660 */
661 void gb_operation_get(struct gb_operation *operation)
662 {
663 kref_get(&operation->kref);
664 }
665 EXPORT_SYMBOL_GPL(gb_operation_get);
666
667 /*
668 * Destroy a previously created operation.
669 */
670 static void _gb_operation_destroy(struct kref *kref)
671 {
672 struct gb_operation *operation;
673
674 operation = container_of(kref, struct gb_operation, kref);
675
676 trace_gb_operation_destroy(operation);
677
678 if (operation->response)
679 gb_operation_message_free(operation->response);
680 gb_operation_message_free(operation->request);
681
682 kmem_cache_free(gb_operation_cache, operation);
683 }
684
685 /*
686 * Drop a reference on an operation, and destroy it when the last
687 * one is gone.
688 */
689 void gb_operation_put(struct gb_operation *operation)
690 {
691 if (WARN_ON(!operation))
692 return;
693
694 kref_put(&operation->kref, _gb_operation_destroy);
695 }
696 EXPORT_SYMBOL_GPL(gb_operation_put);
697
698 /* Tell the requester we're done */
699 static void gb_operation_sync_callback(struct gb_operation *operation)
700 {
701 complete(&operation->completion);
702 }
703
704 /**
705 * gb_operation_request_send() - send an operation request message
706 * @operation: the operation to initiate
707 * @callback: the operation completion callback
708 * @timeout: operation timeout in milliseconds, or zero for no timeout
709 * @gfp: the memory flags to use for any allocations
710 *
711 * The caller has filled in any payload so the request message is ready to go.
712 * The callback function supplied will be called when the response message has
713 * arrived, a unidirectional request has been sent, or the operation is
714 * cancelled, indicating that the operation is complete. The callback function
715 * can fetch the result of the operation using gb_operation_result() if
716 * desired.
717 *
718 * Return: 0 if the request was successfully queued in the host-driver queues,
719 * or a negative errno.
720 */
721 int gb_operation_request_send(struct gb_operation *operation,
722 gb_operation_callback callback,
723 unsigned int timeout,
724 gfp_t gfp)
725 {
726 struct gb_connection *connection = operation->connection;
727 struct gb_operation_msg_hdr *header;
728 unsigned int cycle;
729 int ret;
730
731 if (gb_connection_is_offloaded(connection))
732 return -EBUSY;
733
734 if (!callback)
735 return -EINVAL;
736
737 /*
738 * Record the callback function, which is executed in
739 * non-atomic (workqueue) context when the final result
740 * of an operation has been set.
741 */
742 operation->callback = callback;
743
744 /*
745 * Assign the operation's id, and store it in the request header.
746 * Zero is a reserved operation id for unidirectional operations.
747 */
748 if (gb_operation_is_unidirectional(operation)) {
749 operation->id = 0;
750 } else {
751 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
752 operation->id = (u16)(cycle % U16_MAX + 1);
753 }
754
755 header = operation->request->header;
756 header->operation_id = cpu_to_le16(operation->id);
757
758 gb_operation_result_set(operation, -EINPROGRESS);
759
760 /*
761 * Get an extra reference on the operation. It'll be dropped when the
762 * operation completes.
763 */
764 gb_operation_get(operation);
765 ret = gb_operation_get_active(operation);
766 if (ret)
767 goto err_put;
768
769 ret = gb_message_send(operation->request, gfp);
770 if (ret)
771 goto err_put_active;
772
773 if (timeout) {
774 operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
775 add_timer(&operation->timer);
776 }
777
778 return 0;
779
780 err_put_active:
781 gb_operation_put_active(operation);
782 err_put:
783 gb_operation_put(operation);
784
785 return ret;
786 }
787 EXPORT_SYMBOL_GPL(gb_operation_request_send);
788
789 /*
790 * Send a synchronous operation. This function is expected to
791 * block, returning only when the response has arrived, (or when an
792 * error is detected. The return value is the result of the
793 * operation.
794 */
795 int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
796 unsigned int timeout)
797 {
798 int ret;
799
800 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
801 timeout, GFP_KERNEL);
802 if (ret)
803 return ret;
804
805 ret = wait_for_completion_interruptible(&operation->completion);
806 if (ret < 0) {
807 /* Cancel the operation if interrupted */
808 gb_operation_cancel(operation, -ECANCELED);
809 }
810
811 return gb_operation_result(operation);
812 }
813 EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
814
815 /*
816 * Send a response for an incoming operation request. A non-zero
817 * errno indicates a failed operation.
818 *
819 * If there is any response payload, the incoming request handler is
820 * responsible for allocating the response message. Otherwise the
821 * it can simply supply the result errno; this function will
822 * allocate the response message if necessary.
823 */
824 static int gb_operation_response_send(struct gb_operation *operation,
825 int errno)
826 {
827 struct gb_connection *connection = operation->connection;
828 int ret;
829
830 if (!operation->response &&
831 !gb_operation_is_unidirectional(operation)) {
832 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
833 return -ENOMEM;
834 }
835
836 /* Record the result */
837 if (!gb_operation_result_set(operation, errno)) {
838 dev_err(&connection->hd->dev, "request result already set\n");
839 return -EIO; /* Shouldn't happen */
840 }
841
842 /* Sender of request does not care about response. */
843 if (gb_operation_is_unidirectional(operation))
844 return 0;
845
846 /* Reference will be dropped when message has been sent. */
847 gb_operation_get(operation);
848 ret = gb_operation_get_active(operation);
849 if (ret)
850 goto err_put;
851
852 /* Fill in the response header and send it */
853 operation->response->header->result = gb_operation_errno_map(errno);
854
855 ret = gb_message_send(operation->response, GFP_KERNEL);
856 if (ret)
857 goto err_put_active;
858
859 return 0;
860
861 err_put_active:
862 gb_operation_put_active(operation);
863 err_put:
864 gb_operation_put(operation);
865
866 return ret;
867 }
868
869 /*
870 * This function is called when a message send request has completed.
871 */
872 void greybus_message_sent(struct gb_host_device *hd,
873 struct gb_message *message, int status)
874 {
875 struct gb_operation *operation = message->operation;
876 struct gb_connection *connection = operation->connection;
877
878 /*
879 * If the message was a response, we just need to drop our
880 * reference to the operation. If an error occurred, report
881 * it.
882 *
883 * For requests, if there's no error and the operation in not
884 * unidirectional, there's nothing more to do until the response
885 * arrives. If an error occurred attempting to send it, or if the
886 * operation is unidrectional, record the result of the operation and
887 * schedule its completion.
888 */
889 if (message == operation->response) {
890 if (status) {
891 dev_err(&connection->hd->dev,
892 "%s: error sending response 0x%02x: %d\n",
893 connection->name, operation->type, status);
894 }
895
896 gb_operation_put_active(operation);
897 gb_operation_put(operation);
898 } else if (status || gb_operation_is_unidirectional(operation)) {
899 if (gb_operation_result_set(operation, status)) {
900 queue_work(gb_operation_completion_wq,
901 &operation->work);
902 }
903 }
904 }
905 EXPORT_SYMBOL_GPL(greybus_message_sent);
906
907 /*
908 * We've received data on a connection, and it doesn't look like a
909 * response, so we assume it's a request.
910 *
911 * This is called in interrupt context, so just copy the incoming
912 * data into the request buffer and handle the rest via workqueue.
913 */
914 static void gb_connection_recv_request(struct gb_connection *connection,
915 const struct gb_operation_msg_hdr *header,
916 void *data, size_t size)
917 {
918 struct gb_operation *operation;
919 u16 operation_id;
920 u8 type;
921 int ret;
922
923 operation_id = le16_to_cpu(header->operation_id);
924 type = header->type;
925
926 operation = gb_operation_create_incoming(connection, operation_id,
927 type, data, size);
928 if (!operation) {
929 dev_err(&connection->hd->dev,
930 "%s: can't create incoming operation\n",
931 connection->name);
932 return;
933 }
934
935 ret = gb_operation_get_active(operation);
936 if (ret) {
937 gb_operation_put(operation);
938 return;
939 }
940 trace_gb_message_recv_request(operation->request);
941
942 /*
943 * The initial reference to the operation will be dropped when the
944 * request handler returns.
945 */
946 if (gb_operation_result_set(operation, -EINPROGRESS))
947 queue_work(connection->wq, &operation->work);
948 }
949
950 /*
951 * We've received data that appears to be an operation response
952 * message. Look up the operation, and record that we've received
953 * its response.
954 *
955 * This is called in interrupt context, so just copy the incoming
956 * data into the response buffer and handle the rest via workqueue.
957 */
958 static void gb_connection_recv_response(struct gb_connection *connection,
959 const struct gb_operation_msg_hdr *header,
960 void *data, size_t size)
961 {
962 struct gb_operation *operation;
963 struct gb_message *message;
964 size_t message_size;
965 u16 operation_id;
966 int errno;
967
968 operation_id = le16_to_cpu(header->operation_id);
969
970 if (!operation_id) {
971 dev_err_ratelimited(&connection->hd->dev,
972 "%s: invalid response id 0 received\n",
973 connection->name);
974 return;
975 }
976
977 operation = gb_operation_find_outgoing(connection, operation_id);
978 if (!operation) {
979 dev_err_ratelimited(&connection->hd->dev,
980 "%s: unexpected response id 0x%04x received\n",
981 connection->name, operation_id);
982 return;
983 }
984
985 errno = gb_operation_status_map(header->result);
986 message = operation->response;
987 message_size = sizeof(*header) + message->payload_size;
988 if (!errno && size > message_size) {
989 dev_err_ratelimited(&connection->hd->dev,
990 "%s: malformed response 0x%02x received (%zu > %zu)\n",
991 connection->name, header->type,
992 size, message_size);
993 errno = -EMSGSIZE;
994 } else if (!errno && size < message_size) {
995 if (gb_operation_short_response_allowed(operation)) {
996 message->payload_size = size - sizeof(*header);
997 } else {
998 dev_err_ratelimited(&connection->hd->dev,
999 "%s: short response 0x%02x received (%zu < %zu)\n",
1000 connection->name, header->type,
1001 size, message_size);
1002 errno = -EMSGSIZE;
1003 }
1004 }
1005
1006 /* We must ignore the payload if a bad status is returned */
1007 if (errno)
1008 size = sizeof(*header);
1009
1010 /* The rest will be handled in work queue context */
1011 if (gb_operation_result_set(operation, errno)) {
1012 memcpy(message->buffer, data, size);
1013
1014 trace_gb_message_recv_response(message);
1015
1016 queue_work(gb_operation_completion_wq, &operation->work);
1017 }
1018
1019 gb_operation_put(operation);
1020 }
1021
1022 /*
1023 * Handle data arriving on a connection. As soon as we return the
1024 * supplied data buffer will be reused (so unless we do something
1025 * with, it's effectively dropped).
1026 */
1027 void gb_connection_recv(struct gb_connection *connection,
1028 void *data, size_t size)
1029 {
1030 struct gb_operation_msg_hdr header;
1031 struct device *dev = &connection->hd->dev;
1032 size_t msg_size;
1033
1034 if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1035 gb_connection_is_offloaded(connection)) {
1036 dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1037 connection->name, size);
1038 return;
1039 }
1040
1041 if (size < sizeof(header)) {
1042 dev_err_ratelimited(dev, "%s: short message received\n",
1043 connection->name);
1044 return;
1045 }
1046
1047 /* Use memcpy as data may be unaligned */
1048 memcpy(&header, data, sizeof(header));
1049 msg_size = le16_to_cpu(header.size);
1050 if (size < msg_size) {
1051 dev_err_ratelimited(dev,
1052 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1053 connection->name,
1054 le16_to_cpu(header.operation_id),
1055 header.type, size, msg_size);
1056 return; /* XXX Should still complete operation */
1057 }
1058
1059 if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1060 gb_connection_recv_response(connection, &header, data,
1061 msg_size);
1062 } else {
1063 gb_connection_recv_request(connection, &header, data,
1064 msg_size);
1065 }
1066 }
1067
1068 /*
1069 * Cancel an outgoing operation synchronously, and record the given error to
1070 * indicate why.
1071 */
1072 void gb_operation_cancel(struct gb_operation *operation, int errno)
1073 {
1074 if (WARN_ON(gb_operation_is_incoming(operation)))
1075 return;
1076
1077 if (gb_operation_result_set(operation, errno)) {
1078 gb_message_cancel(operation->request);
1079 queue_work(gb_operation_completion_wq, &operation->work);
1080 }
1081 trace_gb_message_cancel_outgoing(operation->request);
1082
1083 atomic_inc(&operation->waiters);
1084 wait_event(gb_operation_cancellation_queue,
1085 !gb_operation_is_active(operation));
1086 atomic_dec(&operation->waiters);
1087 }
1088 EXPORT_SYMBOL_GPL(gb_operation_cancel);
1089
1090 /*
1091 * Cancel an incoming operation synchronously. Called during connection tear
1092 * down.
1093 */
1094 void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1095 {
1096 if (WARN_ON(!gb_operation_is_incoming(operation)))
1097 return;
1098
1099 if (!gb_operation_is_unidirectional(operation)) {
1100 /*
1101 * Make sure the request handler has submitted the response
1102 * before cancelling it.
1103 */
1104 flush_work(&operation->work);
1105 if (!gb_operation_result_set(operation, errno))
1106 gb_message_cancel(operation->response);
1107 }
1108 trace_gb_message_cancel_incoming(operation->response);
1109
1110 atomic_inc(&operation->waiters);
1111 wait_event(gb_operation_cancellation_queue,
1112 !gb_operation_is_active(operation));
1113 atomic_dec(&operation->waiters);
1114 }
1115
1116 /**
1117 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1118 * @connection: the Greybus connection to send this to
1119 * @type: the type of operation to send
1120 * @request: pointer to a memory buffer to copy the request from
1121 * @request_size: size of @request
1122 * @response: pointer to a memory buffer to copy the response to
1123 * @response_size: the size of @response.
1124 * @timeout: operation timeout in milliseconds
1125 *
1126 * This function implements a simple synchronous Greybus operation. It sends
1127 * the provided operation request and waits (sleeps) until the corresponding
1128 * operation response message has been successfully received, or an error
1129 * occurs. @request and @response are buffers to hold the request and response
1130 * data respectively, and if they are not NULL, their size must be specified in
1131 * @request_size and @response_size.
1132 *
1133 * If a response payload is to come back, and @response is not NULL,
1134 * @response_size number of bytes will be copied into @response if the operation
1135 * is successful.
1136 *
1137 * If there is an error, the response buffer is left alone.
1138 */
1139 int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1140 void *request, int request_size,
1141 void *response, int response_size,
1142 unsigned int timeout)
1143 {
1144 struct gb_operation *operation;
1145 int ret;
1146
1147 if ((response_size && !response) ||
1148 (request_size && !request))
1149 return -EINVAL;
1150
1151 operation = gb_operation_create(connection, type,
1152 request_size, response_size,
1153 GFP_KERNEL);
1154 if (!operation)
1155 return -ENOMEM;
1156
1157 if (request_size)
1158 memcpy(operation->request->payload, request, request_size);
1159
1160 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1161 if (ret) {
1162 dev_err(&connection->hd->dev,
1163 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1164 connection->name, operation->id, type, ret);
1165 } else {
1166 if (response_size) {
1167 memcpy(response, operation->response->payload,
1168 response_size);
1169 }
1170 }
1171
1172 gb_operation_put(operation);
1173
1174 return ret;
1175 }
1176 EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1177
1178 /**
1179 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1180 * @connection: connection to use
1181 * @type: type of operation to send
1182 * @request: memory buffer to copy the request from
1183 * @request_size: size of @request
1184 * @timeout: send timeout in milliseconds
1185 *
1186 * Initiate a unidirectional operation by sending a request message and
1187 * waiting for it to be acknowledged as sent by the host device.
1188 *
1189 * Note that successful send of a unidirectional operation does not imply that
1190 * the request as actually reached the remote end of the connection.
1191 */
1192 int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1193 int type, void *request, int request_size,
1194 unsigned int timeout)
1195 {
1196 struct gb_operation *operation;
1197 int ret;
1198
1199 if (request_size && !request)
1200 return -EINVAL;
1201
1202 operation = gb_operation_create_flags(connection, type,
1203 request_size, 0,
1204 GB_OPERATION_FLAG_UNIDIRECTIONAL,
1205 GFP_KERNEL);
1206 if (!operation)
1207 return -ENOMEM;
1208
1209 if (request_size)
1210 memcpy(operation->request->payload, request, request_size);
1211
1212 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1213 if (ret) {
1214 dev_err(&connection->hd->dev,
1215 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1216 connection->name, type, ret);
1217 }
1218
1219 gb_operation_put(operation);
1220
1221 return ret;
1222 }
1223 EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1224
1225 int __init gb_operation_init(void)
1226 {
1227 gb_message_cache = kmem_cache_create("gb_message_cache",
1228 sizeof(struct gb_message), 0, 0, NULL);
1229 if (!gb_message_cache)
1230 return -ENOMEM;
1231
1232 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1233 sizeof(struct gb_operation), 0, 0, NULL);
1234 if (!gb_operation_cache)
1235 goto err_destroy_message_cache;
1236
1237 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1238 0, 0);
1239 if (!gb_operation_completion_wq)
1240 goto err_destroy_operation_cache;
1241
1242 return 0;
1243
1244 err_destroy_operation_cache:
1245 kmem_cache_destroy(gb_operation_cache);
1246 gb_operation_cache = NULL;
1247 err_destroy_message_cache:
1248 kmem_cache_destroy(gb_message_cache);
1249 gb_message_cache = NULL;
1250
1251 return -ENOMEM;
1252 }
1253
1254 void gb_operation_exit(void)
1255 {
1256 destroy_workqueue(gb_operation_completion_wq);
1257 gb_operation_completion_wq = NULL;
1258 kmem_cache_destroy(gb_operation_cache);
1259 gb_operation_cache = NULL;
1260 kmem_cache_destroy(gb_message_cache);
1261 gb_message_cache = NULL;
1262 }