]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/greybus/operation.c
net: add netlink_ext_ack argument to rtnl_link_ops.validate
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / greybus / operation.c
CommitLineData
e88afa58
AE
1/*
2 * Greybus operations
3 *
d3d2bea1
AE
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
e88afa58
AE
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
fd7134a3
JH
13#include <linux/sched.h>
14#include <linux/wait.h>
e88afa58
AE
15#include <linux/workqueue.h>
16
17#include "greybus.h"
5c8ad599 18#include "greybus_trace.h"
e88afa58 19
5b3db0dd 20static struct kmem_cache *gb_operation_cache;
1e5613b4 21static struct kmem_cache *gb_message_cache;
5b3db0dd 22
701615f8
JH
23/* Workqueue to handle Greybus operation completions. */
24static struct workqueue_struct *gb_operation_completion_wq;
25
fd7134a3
JH
26/* Wait queue for synchronous cancellations. */
27static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28
82b5e3fe 29/*
008974cb 30 * Protects updates to operation->errno.
82b5e3fe 31 */
e88afa58
AE
32static DEFINE_SPINLOCK(gb_operations_lock);
33
abb722e7
JH
34static int gb_operation_response_send(struct gb_operation *operation,
35 int errno);
36
008974cb
JH
37/*
38 * Increment operation active count and add to connection list unless the
39 * connection is going away.
40 *
41 * Caller holds operation reference.
42 */
43static int gb_operation_get_active(struct gb_operation *operation)
3eeac7e3 44{
008974cb
JH
45 struct gb_connection *connection = operation->connection;
46 unsigned long flags;
47
48 spin_lock_irqsave(&connection->lock, flags);
77bbbcf6
JH
49 switch (connection->state) {
50 case GB_CONNECTION_STATE_ENABLED:
51 break;
52 case GB_CONNECTION_STATE_ENABLED_TX:
53 if (gb_operation_is_incoming(operation))
54 goto err_unlock;
55 break;
3de5acfa
JH
56 case GB_CONNECTION_STATE_DISCONNECTING:
57 if (!gb_operation_is_core(operation))
58 goto err_unlock;
59 break;
77bbbcf6
JH
60 default:
61 goto err_unlock;
008974cb
JH
62 }
63
64 if (operation->active++ == 0)
65 list_add_tail(&operation->links, &connection->operations);
66
f866e66f
AE
67 trace_gb_operation_get_active(operation);
68
008974cb
JH
69 spin_unlock_irqrestore(&connection->lock, flags);
70
71 return 0;
77bbbcf6
JH
72
73err_unlock:
74 spin_unlock_irqrestore(&connection->lock, flags);
75
76 return -ENOTCONN;
3eeac7e3
JH
77}
78
79/* Caller holds operation reference. */
008974cb 80static void gb_operation_put_active(struct gb_operation *operation)
3eeac7e3 81{
008974cb
JH
82 struct gb_connection *connection = operation->connection;
83 unsigned long flags;
84
85 spin_lock_irqsave(&connection->lock, flags);
f866e66f 86
df732546 87 trace_gb_operation_put_active(operation);
f866e66f 88
008974cb
JH
89 if (--operation->active == 0) {
90 list_del(&operation->links);
fd7134a3
JH
91 if (atomic_read(&operation->waiters))
92 wake_up(&gb_operation_cancellation_queue);
93 }
008974cb 94 spin_unlock_irqrestore(&connection->lock, flags);
fd7134a3
JH
95}
96
008974cb 97static bool gb_operation_is_active(struct gb_operation *operation)
fd7134a3 98{
008974cb
JH
99 struct gb_connection *connection = operation->connection;
100 unsigned long flags;
101 bool ret;
102
103 spin_lock_irqsave(&connection->lock, flags);
104 ret = operation->active;
105 spin_unlock_irqrestore(&connection->lock, flags);
106
107 return ret;
3eeac7e3
JH
108}
109
3deb37d4 110/*
2fb2d2a7
AE
111 * Set an operation's result.
112 *
113 * Initially an outgoing operation's errno value is -EBADR.
114 * If no error occurs before sending the request message the only
115 * valid value operation->errno can be set to is -EINPROGRESS,
116 * indicating the request has been (or rather is about to be) sent.
117 * At that point nobody should be looking at the result until the
d5062834 118 * response arrives.
3deb37d4
AE
119 *
120 * The first time the result gets set after the request has been
121 * sent, that result "sticks." That is, if two concurrent threads
122 * race to set the result, the first one wins. The return value
123 * tells the caller whether its result was recorded; if not the
2fb2d2a7
AE
124 * caller has nothing more to do.
125 *
126 * The result value -EILSEQ is reserved to signal an implementation
127 * error; if it's ever observed, the code performing the request has
128 * done something fundamentally wrong. It is an error to try to set
129 * the result to -EBADR, and attempts to do so result in a warning,
130 * and -EILSEQ is used instead. Similarly, the only valid result
131 * value to set for an operation in initial state is -EINPROGRESS.
132 * Attempts to do otherwise will also record a (successful) -EILSEQ
133 * operation result.
3deb37d4 134 */
abe9a300 135static bool gb_operation_result_set(struct gb_operation *operation, int result)
ba986b5a 136{
184ab534 137 unsigned long flags;
894cbc31
AE
138 int prev;
139
3deb37d4 140 if (result == -EINPROGRESS) {
2fb2d2a7
AE
141 /*
142 * -EINPROGRESS is used to indicate the request is
143 * in flight. It should be the first result value
144 * set after the initial -EBADR. Issue a warning
145 * and record an implementation error if it's
146 * set at any other time.
147 */
184ab534 148 spin_lock_irqsave(&gb_operations_lock, flags);
894cbc31
AE
149 prev = operation->errno;
150 if (prev == -EBADR)
151 operation->errno = result;
2fb2d2a7
AE
152 else
153 operation->errno = -EILSEQ;
184ab534 154 spin_unlock_irqrestore(&gb_operations_lock, flags);
2fb2d2a7 155 WARN_ON(prev != -EBADR);
894cbc31 156
2fb2d2a7 157 return true;
3deb37d4 158 }
3deb37d4 159
2fb2d2a7
AE
160 /*
161 * The first result value set after a request has been sent
162 * will be the final result of the operation. Subsequent
163 * attempts to set the result are ignored.
164 *
165 * Note that -EBADR is a reserved "initial state" result
166 * value. Attempts to set this value result in a warning,
167 * and the result code is set to -EILSEQ instead.
168 */
169 if (WARN_ON(result == -EBADR))
170 result = -EILSEQ; /* Nobody should be setting -EBADR */
171
184ab534 172 spin_lock_irqsave(&gb_operations_lock, flags);
894cbc31
AE
173 prev = operation->errno;
174 if (prev == -EINPROGRESS)
2fb2d2a7 175 operation->errno = result; /* First and final result */
184ab534 176 spin_unlock_irqrestore(&gb_operations_lock, flags);
894cbc31
AE
177
178 return prev == -EINPROGRESS;
ba986b5a
AE
179}
180
181int gb_operation_result(struct gb_operation *operation)
182{
3deb37d4
AE
183 int result = operation->errno;
184
2fb2d2a7 185 WARN_ON(result == -EBADR);
3deb37d4
AE
186 WARN_ON(result == -EINPROGRESS);
187
188 return result;
ba986b5a 189}
1dad6b35 190EXPORT_SYMBOL_GPL(gb_operation_result);
ba986b5a 191
0581f28e 192/*
048a7ffe
JH
193 * Looks up an outgoing operation on a connection and returns a refcounted
194 * pointer if found, or NULL otherwise.
0581f28e 195 */
84d148b1 196static struct gb_operation *
048a7ffe 197gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
84d148b1 198{
b8616da8 199 struct gb_operation *operation;
184ab534 200 unsigned long flags;
84d148b1
AE
201 bool found = false;
202
008974cb 203 spin_lock_irqsave(&connection->lock, flags);
afb2e134 204 list_for_each_entry(operation, &connection->operations, links)
048a7ffe
JH
205 if (operation->id == operation_id &&
206 !gb_operation_is_incoming(operation)) {
0581f28e 207 gb_operation_get(operation);
84d148b1 208 found = true;
b8616da8
AE
209 break;
210 }
008974cb 211 spin_unlock_irqrestore(&connection->lock, flags);
84d148b1
AE
212
213 return found ? operation : NULL;
214}
215
a52c4352 216static int gb_message_send(struct gb_message *message, gfp_t gfp)
374e6a26 217{
3ed67aba 218 struct gb_connection *connection = message->operation->connection;
002fe66a 219
5c8ad599 220 trace_gb_message_send(message);
3e136cc9 221 return connection->hd->driver->message_send(connection->hd,
0a9c4d70 222 connection->hd_cport_id,
7cf7bca9 223 message,
b84abdcb 224 gfp);
374e6a26
AE
225}
226
6014718d 227/*
7cf7bca9 228 * Cancel a message we have passed to the host device layer to be sent.
6014718d 229 */
35b1342b 230static void gb_message_cancel(struct gb_message *message)
374e6a26 231{
2537636a 232 struct gb_host_device *hd = message->operation->connection->hd;
374e6a26 233
3e136cc9 234 hd->driver->message_cancel(message);
374e6a26 235}
a9163b2c 236
2eb585f8
AE
237static void gb_operation_request_handle(struct gb_operation *operation)
238{
25cdd7aa 239 struct gb_connection *connection = operation->connection;
973ccfd6 240 int status;
ff65be7a 241 int ret;
c3cf2785 242
bfa9a5e2
JH
243 if (connection->handler) {
244 status = connection->handler(operation);
973ccfd6 245 } else {
25cdd7aa 246 dev_err(&connection->hd->dev,
2f3db927 247 "%s: unexpected incoming request of type 0x%02x\n",
25cdd7aa 248 connection->name, operation->type);
2eb585f8 249
973ccfd6
JH
250 status = -EPROTONOSUPPORT;
251 }
ff65be7a 252
973ccfd6 253 ret = gb_operation_response_send(operation, status);
ff65be7a 254 if (ret) {
25cdd7aa 255 dev_err(&connection->hd->dev,
2f3db927 256 "%s: failed to send response %d for type 0x%02x: %d\n",
25cdd7aa 257 connection->name, status, operation->type, ret);
c77bac08 258 return;
ff65be7a 259 }
2eb585f8
AE
260}
261
e88afa58 262/*
c600e535
JH
263 * Process operation work.
264 *
265 * For incoming requests, call the protocol request handler. The operation
266 * result should be -EINPROGRESS at this point.
d4a1ff67
AE
267 *
268 * For outgoing requests, the operation result value should have
269 * been set before queueing this. The operation callback function
270 * allows the original requester to know the request has completed
271 * and its result is available.
e88afa58 272 */
ee637a9b 273static void gb_operation_work(struct work_struct *work)
e88afa58 274{
84d148b1 275 struct gb_operation *operation;
dbec2729 276 int ret;
84d148b1 277
ee637a9b 278 operation = container_of(work, struct gb_operation, work);
37754030 279
dbec2729 280 if (gb_operation_is_incoming(operation)) {
c600e535 281 gb_operation_request_handle(operation);
dbec2729
JH
282 } else {
283 ret = del_timer_sync(&operation->timer);
284 if (!ret) {
285 /* Cancel request message if scheduled by timeout. */
286 if (gb_operation_result(operation) == -ETIMEDOUT)
287 gb_message_cancel(operation->request);
288 }
289
c600e535 290 operation->callback(operation);
dbec2729 291 }
37754030 292
3eeac7e3 293 gb_operation_put_active(operation);
10c69399 294 gb_operation_put(operation);
2eb585f8
AE
295}
296
dbec2729
JH
297static void gb_operation_timeout(unsigned long arg)
298{
299 struct gb_operation *operation = (void *)arg;
300
301 if (gb_operation_result_set(operation, -ETIMEDOUT)) {
302 /*
303 * A stuck request message will be cancelled from the
304 * workqueue.
305 */
306 queue_work(gb_operation_completion_wq, &operation->work);
307 }
308}
309
2537636a 310static void gb_operation_message_init(struct gb_host_device *hd,
dc779229 311 struct gb_message *message, u16 operation_id,
7cfa6995 312 size_t payload_size, u8 type)
dc779229
AE
313{
314 struct gb_operation_msg_hdr *header;
dc779229 315
24ef4853 316 header = message->buffer;
dc779229
AE
317
318 message->header = header;
746e0ef9 319 message->payload = payload_size ? header + 1 : NULL;
7cfa6995 320 message->payload_size = payload_size;
dc779229
AE
321
322 /*
323 * The type supplied for incoming message buffers will be
7adb32b4
JH
324 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
325 * arriving data so there's no need to initialize the message header.
dc779229 326 */
7adb32b4 327 if (type != GB_REQUEST_TYPE_INVALID) {
7cfa6995
AE
328 u16 message_size = (u16)(sizeof(*header) + payload_size);
329
dc779229
AE
330 /*
331 * For a request, the operation id gets filled in
332 * when the message is sent. For a response, it
333 * will be copied from the request by the caller.
334 *
335 * The result field in a request message must be
336 * zero. It will be set just prior to sending for
337 * a response.
338 */
339 header->size = cpu_to_le16(message_size);
340 header->operation_id = 0;
341 header->type = type;
342 header->result = 0;
343 }
344}
345
e88afa58 346/*
ea64cd9a
AE
347 * Allocate a message to be used for an operation request or response.
348 * Both types of message contain a common header. The request message
349 * for an outgoing operation is outbound, as is the response message
350 * for an incoming operation. The message header for an outbound
351 * message is partially initialized here.
352 *
353 * The headers for inbound messages don't need to be initialized;
354 * they'll be filled in by arriving data.
87d208fe 355 *
1e5613b4 356 * Our message buffers have the following layout:
87d208fe
AE
357 * message header \_ these combined are
358 * message payload / the message size
22b320f4 359 */
c08b1dda 360static struct gb_message *
2537636a 361gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
87d208fe 362 size_t payload_size, gfp_t gfp_flags)
22b320f4 363{
c7f82d5d 364 struct gb_message *message;
22b320f4 365 struct gb_operation_msg_hdr *header;
87d208fe 366 size_t message_size = payload_size + sizeof(*header);
22b320f4 367
1e5613b4 368 if (message_size > hd->buffer_size_max) {
b427572e 369 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
0cffcac3 370 message_size, hd->buffer_size_max);
1e5613b4 371 return NULL;
0cffcac3 372 }
1e5613b4
JH
373
374 /* Allocate the message structure and buffer. */
375 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
c08b1dda
AE
376 if (!message)
377 return NULL;
22b320f4 378
24ef4853 379 message->buffer = kzalloc(message_size, gfp_flags);
1e5613b4
JH
380 if (!message->buffer)
381 goto err_free_message;
382
dc779229 383 /* Initialize the message. Operation id is filled in later. */
7cfa6995 384 gb_operation_message_init(hd, message, 0, payload_size, type);
ea64cd9a 385
c08b1dda 386 return message;
1e5613b4
JH
387
388err_free_message:
389 kmem_cache_free(gb_message_cache, message);
390
391 return NULL;
c7f82d5d
AE
392}
393
c08b1dda 394static void gb_operation_message_free(struct gb_message *message)
c7f82d5d 395{
1e5613b4
JH
396 kfree(message->buffer);
397 kmem_cache_free(gb_message_cache, message);
22b320f4
AE
398}
399
bc717fcb 400/*
696e0cca
VK
401 * Map an enum gb_operation_status value (which is represented in a
402 * message as a single byte) to an appropriate Linux negative errno.
bc717fcb 403 */
0c90fff4 404static int gb_operation_status_map(u8 status)
bc717fcb
AE
405{
406 switch (status) {
407 case GB_OP_SUCCESS:
408 return 0;
bc717fcb
AE
409 case GB_OP_INTERRUPTED:
410 return -EINTR;
57248fac
AE
411 case GB_OP_TIMEOUT:
412 return -ETIMEDOUT;
413 case GB_OP_NO_MEMORY:
414 return -ENOMEM;
bc717fcb
AE
415 case GB_OP_PROTOCOL_BAD:
416 return -EPROTONOSUPPORT;
417 case GB_OP_OVERFLOW:
1a365154 418 return -EMSGSIZE;
57248fac
AE
419 case GB_OP_INVALID:
420 return -EINVAL;
421 case GB_OP_RETRY:
422 return -EAGAIN;
aa26351d
AE
423 case GB_OP_NONEXISTENT:
424 return -ENODEV;
57248fac
AE
425 case GB_OP_MALFUNCTION:
426 return -EILSEQ;
427 case GB_OP_UNKNOWN_ERROR:
bc717fcb
AE
428 default:
429 return -EIO;
430 }
431}
432
0c90fff4
AE
433/*
434 * Map a Linux errno value (from operation->errno) into the value
435 * that should represent it in a response message status sent
436 * over the wire. Returns an enum gb_operation_status value (which
437 * is represented in a message as a single byte).
438 */
439static u8 gb_operation_errno_map(int errno)
440{
441 switch (errno) {
442 case 0:
443 return GB_OP_SUCCESS;
444 case -EINTR:
445 return GB_OP_INTERRUPTED;
446 case -ETIMEDOUT:
447 return GB_OP_TIMEOUT;
448 case -ENOMEM:
449 return GB_OP_NO_MEMORY;
450 case -EPROTONOSUPPORT:
451 return GB_OP_PROTOCOL_BAD;
452 case -EMSGSIZE:
453 return GB_OP_OVERFLOW; /* Could be underflow too */
454 case -EINVAL:
455 return GB_OP_INVALID;
456 case -EAGAIN:
457 return GB_OP_RETRY;
458 case -EILSEQ:
459 return GB_OP_MALFUNCTION;
aa26351d
AE
460 case -ENODEV:
461 return GB_OP_NONEXISTENT;
0c90fff4
AE
462 case -EIO:
463 default:
464 return GB_OP_UNKNOWN_ERROR;
465 }
466}
467
82e26f73 468bool gb_operation_response_alloc(struct gb_operation *operation,
1c7658cf 469 size_t response_size, gfp_t gfp)
82e26f73 470{
2537636a 471 struct gb_host_device *hd = operation->connection->hd;
82e26f73
AE
472 struct gb_operation_msg_hdr *request_header;
473 struct gb_message *response;
474 u8 type;
475
6d653370 476 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
1c7658cf 477 response = gb_operation_message_alloc(hd, type, response_size, gfp);
82e26f73
AE
478 if (!response)
479 return false;
480 response->operation = operation;
481
482 /*
483 * Size and type get initialized when the message is
484 * allocated. The errno will be set before sending. All
485 * that's left is the operation id, which we copy from the
486 * request message header (as-is, in little-endian order).
487 */
82b5e3fe 488 request_header = operation->request->header;
82e26f73
AE
489 response->header->operation_id = request_header->operation_id;
490 operation->response = response;
491
492 return true;
493}
1dad6b35 494EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
82e26f73 495
22b320f4
AE
496/*
497 * Create a Greybus operation to be sent over the given connection.
696e0cca 498 * The request buffer will be big enough for a payload of the given
ea64cd9a
AE
499 * size.
500 *
501 * For outgoing requests, the request message's header will be
502 * initialized with the type of the request and the message size.
503 * Outgoing operations must also specify the response buffer size,
504 * which must be sufficient to hold all expected response data. The
505 * response message header will eventually be overwritten, so there's
506 * no need to initialize it here.
22b320f4 507 *
ea64cd9a
AE
508 * Request messages for incoming operations can arrive in interrupt
509 * context, so they must be allocated with GFP_ATOMIC. In this case
510 * the request buffer will be immediately overwritten, so there is
511 * no need to initialize the message header. Responsibility for
512 * allocating a response buffer lies with the incoming request
513 * handler for a protocol. So we don't allocate that here.
e88afa58 514 *
22b320f4
AE
515 * Returns a pointer to the new operation or a null pointer if an
516 * error occurs.
e88afa58 517 */
30a2964f 518static struct gb_operation *
ea64cd9a 519gb_operation_create_common(struct gb_connection *connection, u8 type,
e420721b 520 size_t request_size, size_t response_size,
710067e2 521 unsigned long op_flags, gfp_t gfp_flags)
e88afa58 522{
2537636a 523 struct gb_host_device *hd = connection->hd;
e88afa58 524 struct gb_operation *operation;
e88afa58 525
5b3db0dd 526 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
e88afa58
AE
527 if (!operation)
528 return NULL;
6507cced 529 operation->connection = connection;
e88afa58 530
c08b1dda
AE
531 operation->request = gb_operation_message_alloc(hd, type, request_size,
532 gfp_flags);
533 if (!operation->request)
5b3db0dd 534 goto err_cache;
c08b1dda 535 operation->request->operation = operation;
22b320f4 536
ea64cd9a 537 /* Allocate the response buffer for outgoing operations */
710067e2 538 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
1c7658cf
JH
539 if (!gb_operation_response_alloc(operation, response_size,
540 gfp_flags)) {
5b3db0dd 541 goto err_request;
1c7658cf 542 }
dbec2729
JH
543
544 setup_timer(&operation->timer, gb_operation_timeout,
545 (unsigned long)operation);
82b5e3fe 546 }
710067e2
JH
547
548 operation->flags = op_flags;
549 operation->type = type;
3deb37d4 550 operation->errno = -EBADR; /* Initial value--means "never set" */
e88afa58 551
ee637a9b 552 INIT_WORK(&operation->work, gb_operation_work);
e88afa58 553 init_completion(&operation->completion);
c7d0f258 554 kref_init(&operation->kref);
fd7134a3 555 atomic_set(&operation->waiters, 0);
e88afa58 556
e88afa58 557 return operation;
5b3db0dd
AE
558
559err_request:
c08b1dda 560 gb_operation_message_free(operation->request);
5b3db0dd
AE
561err_cache:
562 kmem_cache_free(gb_operation_cache, operation);
563
564 return NULL;
e88afa58
AE
565}
566
55f66a88
AE
567/*
568 * Create a new operation associated with the given connection. The
569 * request and response sizes provided are the number of bytes
570 * required to hold the request/response payload only. Both of
571 * these are allowed to be 0. Note that 0x00 is reserved as an
572 * invalid operation type for all protocols, and this is enforced
573 * here.
574 */
7e43e337
JH
575struct gb_operation *
576gb_operation_create_flags(struct gb_connection *connection,
577 u8 type, size_t request_size,
578 size_t response_size, unsigned long flags,
579 gfp_t gfp)
30a2964f 580{
f866e66f
AE
581 struct gb_operation *operation;
582
7adb32b4 583 if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
55f66a88 584 return NULL;
6d653370
AE
585 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
586 type &= ~GB_MESSAGE_TYPE_RESPONSE;
55f66a88 587
7e43e337
JH
588 if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
589 flags &= GB_OPERATION_FLAG_USER_MASK;
590
f866e66f 591 operation = gb_operation_create_common(connection, type,
7e43e337
JH
592 request_size, response_size,
593 flags, gfp);
f866e66f
AE
594 if (operation)
595 trace_gb_operation_create(operation);
596
597 return operation;
30a2964f 598}
7e43e337 599EXPORT_SYMBOL_GPL(gb_operation_create_flags);
30a2964f 600
18079ece
JH
601struct gb_operation *
602gb_operation_create_core(struct gb_connection *connection,
603 u8 type, size_t request_size,
604 size_t response_size, unsigned long flags,
605 gfp_t gfp)
606{
607 struct gb_operation *operation;
608
609 flags |= GB_OPERATION_FLAG_CORE;
610
611 operation = gb_operation_create_common(connection, type,
612 request_size, response_size,
613 flags, gfp);
614 if (operation)
615 trace_gb_operation_create_core(operation);
616
617 return operation;
618}
619/* Do not export this function. */
620
d52b35f6
JH
621size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
622{
2537636a 623 struct gb_host_device *hd = connection->hd;
d52b35f6
JH
624
625 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
626}
627EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
628
30a2964f 629static struct gb_operation *
ea64cd9a 630gb_operation_create_incoming(struct gb_connection *connection, u16 id,
cfa79699 631 u8 type, void *data, size_t size)
30a2964f 632{
34db1f91 633 struct gb_operation *operation;
cfa79699 634 size_t request_size;
710067e2 635 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
cfa79699
JH
636
637 /* Caller has made sure we at least have a message header. */
638 request_size = size - sizeof(struct gb_operation_msg_hdr);
34db1f91 639
e3398811
JH
640 if (!id)
641 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
642
710067e2 643 operation = gb_operation_create_common(connection, type,
7adb32b4
JH
644 request_size,
645 GB_REQUEST_TYPE_INVALID,
646 flags, GFP_ATOMIC);
9a586bd2
JH
647 if (!operation)
648 return NULL;
649
650 operation->id = id;
651 memcpy(operation->request->header, data, size);
f866e66f 652 trace_gb_operation_create_incoming(operation);
34db1f91
AE
653
654 return operation;
30a2964f
AE
655}
656
deb4b9ef
AE
657/*
658 * Get an additional reference on an operation.
659 */
660void gb_operation_get(struct gb_operation *operation)
661{
662 kref_get(&operation->kref);
663}
1dad6b35 664EXPORT_SYMBOL_GPL(gb_operation_get);
deb4b9ef 665
e88afa58
AE
666/*
667 * Destroy a previously created operation.
668 */
c7d0f258 669static void _gb_operation_destroy(struct kref *kref)
e88afa58 670{
c7d0f258
AE
671 struct gb_operation *operation;
672
673 operation = container_of(kref, struct gb_operation, kref);
e88afa58 674
f866e66f
AE
675 trace_gb_operation_destroy(operation);
676
94896676
JH
677 if (operation->response)
678 gb_operation_message_free(operation->response);
c08b1dda 679 gb_operation_message_free(operation->request);
e88afa58 680
5b3db0dd 681 kmem_cache_free(gb_operation_cache, operation);
e88afa58 682}
d90c25b0 683
deb4b9ef
AE
684/*
685 * Drop a reference on an operation, and destroy it when the last
686 * one is gone.
687 */
c7d0f258
AE
688void gb_operation_put(struct gb_operation *operation)
689{
85109f7d
JH
690 if (WARN_ON(!operation))
691 return;
692
008974cb 693 kref_put(&operation->kref, _gb_operation_destroy);
c7d0f258 694}
df469a94 695EXPORT_SYMBOL_GPL(gb_operation_put);
c7d0f258 696
10c69399
AE
697/* Tell the requester we're done */
698static void gb_operation_sync_callback(struct gb_operation *operation)
699{
700 complete(&operation->completion);
701}
702
613c15e8
JH
703/**
704 * gb_operation_request_send() - send an operation request message
705 * @operation: the operation to initiate
706 * @callback: the operation completion callback
dbec2729 707 * @timeout: operation timeout in milliseconds, or zero for no timeout
613c15e8
JH
708 * @gfp: the memory flags to use for any allocations
709 *
710 * The caller has filled in any payload so the request message is ready to go.
711 * The callback function supplied will be called when the response message has
3e2ee2c1
JH
712 * arrived, a unidirectional request has been sent, or the operation is
713 * cancelled, indicating that the operation is complete. The callback function
714 * can fetch the result of the operation using gb_operation_result() if
715 * desired.
613c15e8
JH
716 *
717 * Return: 0 if the request was successfully queued in the host-driver queues,
718 * or a negative errno.
d90c25b0
AE
719 */
720int gb_operation_request_send(struct gb_operation *operation,
a52c4352 721 gb_operation_callback callback,
dbec2729 722 unsigned int timeout,
a52c4352 723 gfp_t gfp)
d90c25b0 724{
afb2e134
AE
725 struct gb_connection *connection = operation->connection;
726 struct gb_operation_msg_hdr *header;
4afb7fd0 727 unsigned int cycle;
ea2c2ee8 728 int ret;
d90c25b0 729
ca1f8f80
JH
730 if (gb_connection_is_offloaded(connection))
731 return -EBUSY;
732
37754030
JH
733 if (!callback)
734 return -EINVAL;
3e2ee2c1 735
c25572ca
AE
736 /*
737 * Record the callback function, which is executed in
738 * non-atomic (workqueue) context when the final result
739 * of an operation has been set.
740 */
741 operation->callback = callback;
afb2e134
AE
742
743 /*
744 * Assign the operation's id, and store it in the request header.
3e2ee2c1 745 * Zero is a reserved operation id for unidirectional operations.
afb2e134 746 */
3e2ee2c1
JH
747 if (gb_operation_is_unidirectional(operation)) {
748 operation->id = 0;
749 } else {
750 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
751 operation->id = (u16)(cycle % U16_MAX + 1);
752 }
753
afb2e134
AE
754 header = operation->request->header;
755 header->operation_id = cpu_to_le16(operation->id);
e8b48d15 756
3deb37d4 757 gb_operation_result_set(operation, -EINPROGRESS);
c25572ca 758
3325a4ad
JH
759 /*
760 * Get an extra reference on the operation. It'll be dropped when the
761 * operation completes.
762 */
763 gb_operation_get(operation);
764 ret = gb_operation_get_active(operation);
765 if (ret)
766 goto err_put;
767
a52c4352 768 ret = gb_message_send(operation->request, gfp);
008974cb
JH
769 if (ret)
770 goto err_put_active;
771
dbec2729
JH
772 if (timeout) {
773 operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
774 add_timer(&operation->timer);
775 }
776
008974cb
JH
777 return 0;
778
779err_put_active:
780 gb_operation_put_active(operation);
781err_put:
782 gb_operation_put(operation);
ea2c2ee8
JH
783
784 return ret;
c25572ca 785}
1dad6b35 786EXPORT_SYMBOL_GPL(gb_operation_request_send);
c25572ca
AE
787
788/*
789 * Send a synchronous operation. This function is expected to
790 * block, returning only when the response has arrived, (or when an
791 * error is detected. The return value is the result of the
792 * operation.
793 */
4f2c08ab
JH
794int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
795 unsigned int timeout)
c25572ca
AE
796{
797 int ret;
798
a52c4352 799 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
dbec2729 800 timeout, GFP_KERNEL);
c25572ca 801 if (ret)
d90c25b0 802 return ret;
8350e7a0 803
dbec2729 804 ret = wait_for_completion_interruptible(&operation->completion);
7bad4e85
PH
805 if (ret < 0) {
806 /* Cancel the operation if interrupted */
1a365154 807 gb_operation_cancel(operation, -ECANCELED);
7bad4e85 808 }
2cf72a23 809
ba986b5a 810 return gb_operation_result(operation);
d90c25b0 811}
4f2c08ab 812EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
d90c25b0
AE
813
814/*
82e26f73
AE
815 * Send a response for an incoming operation request. A non-zero
816 * errno indicates a failed operation.
817 *
818 * If there is any response payload, the incoming request handler is
819 * responsible for allocating the response message. Otherwise the
820 * it can simply supply the result errno; this function will
821 * allocate the response message if necessary.
d90c25b0 822 */
abb722e7
JH
823static int gb_operation_response_send(struct gb_operation *operation,
824 int errno)
d90c25b0 825{
e1baa3f0 826 struct gb_connection *connection = operation->connection;
0fb5acc4
JH
827 int ret;
828
fde7382b
JH
829 if (!operation->response &&
830 !gb_operation_is_unidirectional(operation)) {
1c7658cf 831 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
fde7382b
JH
832 return -ENOMEM;
833 }
834
d2d2c0fe
AE
835 /* Record the result */
836 if (!gb_operation_result_set(operation, errno)) {
25cdd7aa 837 dev_err(&connection->hd->dev, "request result already set\n");
d2d2c0fe
AE
838 return -EIO; /* Shouldn't happen */
839 }
d90c25b0 840
1d771fe4 841 /* Sender of request does not care about response. */
e3398811 842 if (gb_operation_is_unidirectional(operation))
1d771fe4
JH
843 return 0;
844
0fb5acc4
JH
845 /* Reference will be dropped when message has been sent. */
846 gb_operation_get(operation);
008974cb
JH
847 ret = gb_operation_get_active(operation);
848 if (ret)
849 goto err_put;
0fb5acc4 850
82e26f73
AE
851 /* Fill in the response header and send it */
852 operation->response->header->result = gb_operation_errno_map(errno);
853
a52c4352 854 ret = gb_message_send(operation->response, GFP_KERNEL);
008974cb
JH
855 if (ret)
856 goto err_put_active;
857
858 return 0;
859
860err_put_active:
861 gb_operation_put_active(operation);
862err_put:
863 gb_operation_put(operation);
0fb5acc4
JH
864
865 return ret;
d90c25b0
AE
866}
867
d98b52b0 868/*
7cf7bca9 869 * This function is called when a message send request has completed.
d98b52b0 870 */
2537636a 871void greybus_message_sent(struct gb_host_device *hd,
7cf7bca9 872 struct gb_message *message, int status)
d98b52b0 873{
a4e08469
JH
874 struct gb_operation *operation = message->operation;
875 struct gb_connection *connection = operation->connection;
d98b52b0 876
d4a1ff67
AE
877 /*
878 * If the message was a response, we just need to drop our
879 * reference to the operation. If an error occurred, report
880 * it.
881 *
3e2ee2c1
JH
882 * For requests, if there's no error and the operation in not
883 * unidirectional, there's nothing more to do until the response
884 * arrives. If an error occurred attempting to send it, or if the
885 * operation is unidrectional, record the result of the operation and
886 * schedule its completion.
d4a1ff67 887 */
d4a1ff67 888 if (message == operation->response) {
e1baa3f0 889 if (status) {
25cdd7aa 890 dev_err(&connection->hd->dev,
2f3db927 891 "%s: error sending response 0x%02x: %d\n",
25cdd7aa 892 connection->name, operation->type, status);
e1baa3f0 893 }
3e2ee2c1 894
3eeac7e3 895 gb_operation_put_active(operation);
d4a1ff67 896 gb_operation_put(operation);
3e2ee2c1 897 } else if (status || gb_operation_is_unidirectional(operation)) {
701615f8
JH
898 if (gb_operation_result_set(operation, status)) {
899 queue_work(gb_operation_completion_wq,
900 &operation->work);
901 }
d4a1ff67 902 }
d98b52b0 903}
7cf7bca9 904EXPORT_SYMBOL_GPL(greybus_message_sent);
d98b52b0 905
2eb585f8 906/*
d37b1db1
AE
907 * We've received data on a connection, and it doesn't look like a
908 * response, so we assume it's a request.
78496db0
AE
909 *
910 * This is called in interrupt context, so just copy the incoming
d37b1db1
AE
911 * data into the request buffer and handle the rest via workqueue.
912 */
85a04428 913static void gb_connection_recv_request(struct gb_connection *connection,
2321f049
JH
914 const struct gb_operation_msg_hdr *header,
915 void *data, size_t size)
d37b1db1
AE
916{
917 struct gb_operation *operation;
2321f049
JH
918 u16 operation_id;
919 u8 type;
008974cb 920 int ret;
d37b1db1 921
2321f049
JH
922 operation_id = le16_to_cpu(header->operation_id);
923 type = header->type;
924
34db1f91 925 operation = gb_operation_create_incoming(connection, operation_id,
82b5e3fe 926 type, data, size);
d37b1db1 927 if (!operation) {
25cdd7aa
JH
928 dev_err(&connection->hd->dev,
929 "%s: can't create incoming operation\n",
930 connection->name);
ff65e20e 931 return;
d37b1db1 932 }
d37b1db1 933
008974cb
JH
934 ret = gb_operation_get_active(operation);
935 if (ret) {
936 gb_operation_put(operation);
937 return;
938 }
5c8ad599 939 trace_gb_message_recv_request(operation->request);
3eeac7e3 940
d4a1ff67 941 /*
c600e535
JH
942 * The initial reference to the operation will be dropped when the
943 * request handler returns.
d4a1ff67 944 */
d4a1ff67 945 if (gb_operation_result_set(operation, -EINPROGRESS))
5a5bc354 946 queue_work(connection->wq, &operation->work);
d37b1db1
AE
947}
948
949/*
950 * We've received data that appears to be an operation response
951 * message. Look up the operation, and record that we've received
696e0cca 952 * its response.
78496db0 953 *
d37b1db1
AE
954 * This is called in interrupt context, so just copy the incoming
955 * data into the response buffer and handle the rest via workqueue.
956 */
957static void gb_connection_recv_response(struct gb_connection *connection,
dfcba862
JH
958 const struct gb_operation_msg_hdr *header,
959 void *data, size_t size)
d37b1db1
AE
960{
961 struct gb_operation *operation;
962 struct gb_message *message;
7cfa6995 963 size_t message_size;
dfcba862
JH
964 u16 operation_id;
965 int errno;
966
967 operation_id = le16_to_cpu(header->operation_id);
d37b1db1 968
3e2ee2c1 969 if (!operation_id) {
b0e97bce 970 dev_err_ratelimited(&connection->hd->dev,
3e2ee2c1
JH
971 "%s: invalid response id 0 received\n",
972 connection->name);
973 return;
974 }
975
048a7ffe 976 operation = gb_operation_find_outgoing(connection, operation_id);
d37b1db1 977 if (!operation) {
b0e97bce
ES
978 dev_err_ratelimited(&connection->hd->dev,
979 "%s: unexpected response id 0x%04x received\n",
980 connection->name, operation_id);
d37b1db1
AE
981 return;
982 }
983
dfcba862 984 errno = gb_operation_status_map(header->result);
c08b1dda 985 message = operation->response;
34804efb 986 message_size = sizeof(*header) + message->payload_size;
7e43e337 987 if (!errno && size > message_size) {
b0e97bce 988 dev_err_ratelimited(&connection->hd->dev,
7e43e337
JH
989 "%s: malformed response 0x%02x received (%zu > %zu)\n",
990 connection->name, header->type,
991 size, message_size);
64ce39a3 992 errno = -EMSGSIZE;
7e43e337
JH
993 } else if (!errno && size < message_size) {
994 if (gb_operation_short_response_allowed(operation)) {
995 message->payload_size = size - sizeof(*header);
996 } else {
b0e97bce 997 dev_err_ratelimited(&connection->hd->dev,
7e43e337
JH
998 "%s: short response 0x%02x received (%zu < %zu)\n",
999 connection->name, header->type,
1000 size, message_size);
1001 errno = -EMSGSIZE;
1002 }
d37b1db1 1003 }
d37b1db1 1004
25d0f81a 1005 /* We must ignore the payload if a bad status is returned */
64ce39a3 1006 if (errno)
34804efb 1007 size = sizeof(*header);
d37b1db1
AE
1008
1009 /* The rest will be handled in work queue context */
e4340b13 1010 if (gb_operation_result_set(operation, errno)) {
dfcba862 1011 memcpy(message->buffer, data, size);
112f563e
JH
1012
1013 trace_gb_message_recv_response(message);
1014
701615f8 1015 queue_work(gb_operation_completion_wq, &operation->work);
e4340b13 1016 }
0581f28e
JH
1017
1018 gb_operation_put(operation);
d37b1db1
AE
1019}
1020
1021/*
1022 * Handle data arriving on a connection. As soon as we return the
1023 * supplied data buffer will be reused (so unless we do something
1024 * with, it's effectively dropped).
2eb585f8 1025 */
61089e89 1026void gb_connection_recv(struct gb_connection *connection,
d90c25b0
AE
1027 void *data, size_t size)
1028{
564c72b1 1029 struct gb_operation_msg_hdr header;
25cdd7aa 1030 struct device *dev = &connection->hd->dev;
d37b1db1 1031 size_t msg_size;
d90c25b0 1032
8890f957 1033 if (connection->state == GB_CONNECTION_STATE_DISABLED ||
ca1f8f80 1034 gb_connection_is_offloaded(connection)) {
b0e97bce 1035 dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
25cdd7aa 1036 connection->name, size);
36561f23 1037 return;
d37b1db1 1038 }
36561f23 1039
564c72b1 1040 if (size < sizeof(header)) {
b0e97bce
ES
1041 dev_err_ratelimited(dev, "%s: short message received\n",
1042 connection->name);
d90c25b0
AE
1043 return;
1044 }
1045
564c72b1
JH
1046 /* Use memcpy as data may be unaligned */
1047 memcpy(&header, data, sizeof(header));
1048 msg_size = le16_to_cpu(header.size);
0150bd7f 1049 if (size < msg_size) {
b0e97bce
ES
1050 dev_err_ratelimited(dev,
1051 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1052 connection->name,
1053 le16_to_cpu(header.operation_id),
1054 header.type, size, msg_size);
d37b1db1 1055 return; /* XXX Should still complete operation */
d90c25b0
AE
1056 }
1057
2321f049 1058 if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
dfcba862
JH
1059 gb_connection_recv_response(connection, &header, data,
1060 msg_size);
2321f049
JH
1061 } else {
1062 gb_connection_recv_request(connection, &header, data,
1063 msg_size);
1064 }
2eb585f8
AE
1065}
1066
e1158df0 1067/*
5a3be769
JH
1068 * Cancel an outgoing operation synchronously, and record the given error to
1069 * indicate why.
e1158df0 1070 */
f68c05c0 1071void gb_operation_cancel(struct gb_operation *operation, int errno)
e1158df0 1072{
5a3be769
JH
1073 if (WARN_ON(gb_operation_is_incoming(operation)))
1074 return;
1075
1076 if (gb_operation_result_set(operation, errno)) {
1077 gb_message_cancel(operation->request);
701615f8 1078 queue_work(gb_operation_completion_wq, &operation->work);
abe9a300 1079 }
5c8ad599 1080 trace_gb_message_cancel_outgoing(operation->request);
fd7134a3
JH
1081
1082 atomic_inc(&operation->waiters);
1083 wait_event(gb_operation_cancellation_queue,
1084 !gb_operation_is_active(operation));
1085 atomic_dec(&operation->waiters);
e1158df0 1086}
1dad6b35 1087EXPORT_SYMBOL_GPL(gb_operation_cancel);
e1158df0 1088
5a3be769
JH
1089/*
1090 * Cancel an incoming operation synchronously. Called during connection tear
1091 * down.
1092 */
1093void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1094{
1095 if (WARN_ON(!gb_operation_is_incoming(operation)))
1096 return;
1097
1098 if (!gb_operation_is_unidirectional(operation)) {
1099 /*
1100 * Make sure the request handler has submitted the response
1101 * before cancelling it.
1102 */
1103 flush_work(&operation->work);
1104 if (!gb_operation_result_set(operation, errno))
1105 gb_message_cancel(operation->response);
1106 }
5c8ad599 1107 trace_gb_message_cancel_incoming(operation->response);
5a3be769
JH
1108
1109 atomic_inc(&operation->waiters);
1110 wait_event(gb_operation_cancellation_queue,
1111 !gb_operation_is_active(operation));
1112 atomic_dec(&operation->waiters);
1113}
1114
10aa801d 1115/**
410abddb 1116 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
10aa801d
GKH
1117 * @connection: the Greybus connection to send this to
1118 * @type: the type of operation to send
1119 * @request: pointer to a memory buffer to copy the request from
1120 * @request_size: size of @request
1121 * @response: pointer to a memory buffer to copy the response to
1122 * @response_size: the size of @response.
129a06f5 1123 * @timeout: operation timeout in milliseconds
10aa801d
GKH
1124 *
1125 * This function implements a simple synchronous Greybus operation. It sends
1126 * the provided operation request and waits (sleeps) until the corresponding
1127 * operation response message has been successfully received, or an error
1128 * occurs. @request and @response are buffers to hold the request and response
1129 * data respectively, and if they are not NULL, their size must be specified in
1130 * @request_size and @response_size.
1131 *
1132 * If a response payload is to come back, and @response is not NULL,
1133 * @response_size number of bytes will be copied into @response if the operation
1134 * is successful.
1135 *
1136 * If there is an error, the response buffer is left alone.
1137 */
129a06f5
JH
1138int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1139 void *request, int request_size,
1140 void *response, int response_size,
1141 unsigned int timeout)
10aa801d
GKH
1142{
1143 struct gb_operation *operation;
1144 int ret;
1145
1146 if ((response_size && !response) ||
1147 (request_size && !request))
1148 return -EINVAL;
1149
1150 operation = gb_operation_create(connection, type,
e420721b
JH
1151 request_size, response_size,
1152 GFP_KERNEL);
10aa801d
GKH
1153 if (!operation)
1154 return -ENOMEM;
1155
1156 if (request_size)
6cd6ec55 1157 memcpy(operation->request->payload, request, request_size);
10aa801d 1158
129a06f5 1159 ret = gb_operation_request_send_sync_timeout(operation, timeout);
ee8f81b0 1160 if (ret) {
05e30955 1161 dev_err(&connection->hd->dev,
e514dec7
DL
1162 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1163 connection->name, operation->id, type, ret);
ee8f81b0
JH
1164 } else {
1165 if (response_size) {
10aa801d
GKH
1166 memcpy(response, operation->response->payload,
1167 response_size);
ee8f81b0
JH
1168 }
1169 }
6ab1ce4d
JH
1170
1171 gb_operation_put(operation);
10aa801d
GKH
1172
1173 return ret;
1174}
129a06f5 1175EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
10aa801d 1176
5fdc027d
JH
1177/**
1178 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1179 * @connection: connection to use
1180 * @type: type of operation to send
1181 * @request: memory buffer to copy the request from
1182 * @request_size: size of @request
1183 * @timeout: send timeout in milliseconds
1184 *
1185 * Initiate a unidirectional operation by sending a request message and
1186 * waiting for it to be acknowledged as sent by the host device.
1187 *
1188 * Note that successful send of a unidirectional operation does not imply that
1189 * the request as actually reached the remote end of the connection.
1190 */
1191int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1192 int type, void *request, int request_size,
1193 unsigned int timeout)
1194{
1195 struct gb_operation *operation;
1196 int ret;
1197
1198 if (request_size && !request)
1199 return -EINVAL;
1200
1201 operation = gb_operation_create_flags(connection, type,
1202 request_size, 0,
1203 GB_OPERATION_FLAG_UNIDIRECTIONAL,
1204 GFP_KERNEL);
1205 if (!operation)
1206 return -ENOMEM;
1207
1208 if (request_size)
1209 memcpy(operation->request->payload, request, request_size);
1210
1211 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1212 if (ret) {
1213 dev_err(&connection->hd->dev,
1214 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1215 connection->name, type, ret);
1216 }
1217
1218 gb_operation_put(operation);
1219
1220 return ret;
1221}
1222EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1223
47ed2c92 1224int __init gb_operation_init(void)
2eb585f8 1225{
1e5613b4
JH
1226 gb_message_cache = kmem_cache_create("gb_message_cache",
1227 sizeof(struct gb_message), 0, 0, NULL);
1228 if (!gb_message_cache)
0cffcac3
AE
1229 return -ENOMEM;
1230
5b3db0dd
AE
1231 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1232 sizeof(struct gb_operation), 0, 0, NULL);
1233 if (!gb_operation_cache)
1e5613b4 1234 goto err_destroy_message_cache;
5b3db0dd 1235
701615f8
JH
1236 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1237 0, 0);
1238 if (!gb_operation_completion_wq)
1239 goto err_destroy_operation_cache;
1240
2eb585f8 1241 return 0;
5a5bc354 1242
701615f8
JH
1243err_destroy_operation_cache:
1244 kmem_cache_destroy(gb_operation_cache);
1245 gb_operation_cache = NULL;
1e5613b4
JH
1246err_destroy_message_cache:
1247 kmem_cache_destroy(gb_message_cache);
1248 gb_message_cache = NULL;
0cffcac3
AE
1249
1250 return -ENOMEM;
2eb585f8
AE
1251}
1252
f35ab903 1253void gb_operation_exit(void)
2eb585f8 1254{
701615f8
JH
1255 destroy_workqueue(gb_operation_completion_wq);
1256 gb_operation_completion_wq = NULL;
837b3b7c
VK
1257 kmem_cache_destroy(gb_operation_cache);
1258 gb_operation_cache = NULL;
1e5613b4
JH
1259 kmem_cache_destroy(gb_message_cache);
1260 gb_message_cache = NULL;
d90c25b0 1261}