]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/misc/mei/client.c
pinctrl: sh-pfc: r8a77965: Add DRIF pins, groups and functions
[mirror_ubuntu-focal-kernel.git] / drivers / misc / mei / client.c
1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/sched/signal.h>
18 #include <linux/wait.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/pm_runtime.h>
22
23 #include <linux/mei.h>
24
25 #include "mei_dev.h"
26 #include "hbm.h"
27 #include "client.h"
28
29 /**
30 * mei_me_cl_init - initialize me client
31 *
32 * @me_cl: me client
33 */
34 void mei_me_cl_init(struct mei_me_client *me_cl)
35 {
36 INIT_LIST_HEAD(&me_cl->list);
37 kref_init(&me_cl->refcnt);
38 }
39
40 /**
41 * mei_me_cl_get - increases me client refcount
42 *
43 * @me_cl: me client
44 *
45 * Locking: called under "dev->device_lock" lock
46 *
47 * Return: me client or NULL
48 */
49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50 {
51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 return me_cl;
53
54 return NULL;
55 }
56
57 /**
58 * mei_me_cl_release - free me client
59 *
60 * Locking: called under "dev->device_lock" lock
61 *
62 * @ref: me_client refcount
63 */
64 static void mei_me_cl_release(struct kref *ref)
65 {
66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt);
68
69 kfree(me_cl);
70 }
71
72 /**
73 * mei_me_cl_put - decrease me client refcount and free client if necessary
74 *
75 * Locking: called under "dev->device_lock" lock
76 *
77 * @me_cl: me client
78 */
79 void mei_me_cl_put(struct mei_me_client *me_cl)
80 {
81 if (me_cl)
82 kref_put(&me_cl->refcnt, mei_me_cl_release);
83 }
84
85 /**
86 * __mei_me_cl_del - delete me client from the list and decrease
87 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95 {
96 if (!me_cl)
97 return;
98
99 list_del_init(&me_cl->list);
100 mei_me_cl_put(me_cl);
101 }
102
103 /**
104 * mei_me_cl_del - delete me client from the list and decrease
105 * reference counter
106 *
107 * @dev: mei device
108 * @me_cl: me client
109 */
110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
111 {
112 down_write(&dev->me_clients_rwsem);
113 __mei_me_cl_del(dev, me_cl);
114 up_write(&dev->me_clients_rwsem);
115 }
116
117 /**
118 * mei_me_cl_add - add me client to the list
119 *
120 * @dev: mei device
121 * @me_cl: me client
122 */
123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
124 {
125 down_write(&dev->me_clients_rwsem);
126 list_add(&me_cl->list, &dev->me_clients);
127 up_write(&dev->me_clients_rwsem);
128 }
129
130 /**
131 * __mei_me_cl_by_uuid - locate me client by uuid
132 * increases ref count
133 *
134 * @dev: mei device
135 * @uuid: me client uuid
136 *
137 * Return: me client or NULL if not found
138 *
139 * Locking: dev->me_clients_rwsem
140 */
141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
142 const uuid_le *uuid)
143 {
144 struct mei_me_client *me_cl;
145 const uuid_le *pn;
146
147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
148
149 list_for_each_entry(me_cl, &dev->me_clients, list) {
150 pn = &me_cl->props.protocol_name;
151 if (uuid_le_cmp(*uuid, *pn) == 0)
152 return mei_me_cl_get(me_cl);
153 }
154
155 return NULL;
156 }
157
158 /**
159 * mei_me_cl_by_uuid - locate me client by uuid
160 * increases ref count
161 *
162 * @dev: mei device
163 * @uuid: me client uuid
164 *
165 * Return: me client or NULL if not found
166 *
167 * Locking: dev->me_clients_rwsem
168 */
169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
170 const uuid_le *uuid)
171 {
172 struct mei_me_client *me_cl;
173
174 down_read(&dev->me_clients_rwsem);
175 me_cl = __mei_me_cl_by_uuid(dev, uuid);
176 up_read(&dev->me_clients_rwsem);
177
178 return me_cl;
179 }
180
181 /**
182 * mei_me_cl_by_id - locate me client by client id
183 * increases ref count
184 *
185 * @dev: the device structure
186 * @client_id: me client id
187 *
188 * Return: me client or NULL if not found
189 *
190 * Locking: dev->me_clients_rwsem
191 */
192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
193 {
194
195 struct mei_me_client *__me_cl, *me_cl = NULL;
196
197 down_read(&dev->me_clients_rwsem);
198 list_for_each_entry(__me_cl, &dev->me_clients, list) {
199 if (__me_cl->client_id == client_id) {
200 me_cl = mei_me_cl_get(__me_cl);
201 break;
202 }
203 }
204 up_read(&dev->me_clients_rwsem);
205
206 return me_cl;
207 }
208
209 /**
210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
211 * increases ref count
212 *
213 * @dev: the device structure
214 * @uuid: me client uuid
215 * @client_id: me client id
216 *
217 * Return: me client or null if not found
218 *
219 * Locking: dev->me_clients_rwsem
220 */
221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
222 const uuid_le *uuid, u8 client_id)
223 {
224 struct mei_me_client *me_cl;
225 const uuid_le *pn;
226
227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
228
229 list_for_each_entry(me_cl, &dev->me_clients, list) {
230 pn = &me_cl->props.protocol_name;
231 if (uuid_le_cmp(*uuid, *pn) == 0 &&
232 me_cl->client_id == client_id)
233 return mei_me_cl_get(me_cl);
234 }
235
236 return NULL;
237 }
238
239
240 /**
241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
242 * increases ref count
243 *
244 * @dev: the device structure
245 * @uuid: me client uuid
246 * @client_id: me client id
247 *
248 * Return: me client or null if not found
249 */
250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
251 const uuid_le *uuid, u8 client_id)
252 {
253 struct mei_me_client *me_cl;
254
255 down_read(&dev->me_clients_rwsem);
256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
257 up_read(&dev->me_clients_rwsem);
258
259 return me_cl;
260 }
261
262 /**
263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
264 *
265 * @dev: the device structure
266 * @uuid: me client uuid
267 *
268 * Locking: called under "dev->device_lock" lock
269 */
270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
271 {
272 struct mei_me_client *me_cl;
273
274 dev_dbg(dev->dev, "remove %pUl\n", uuid);
275
276 down_write(&dev->me_clients_rwsem);
277 me_cl = __mei_me_cl_by_uuid(dev, uuid);
278 __mei_me_cl_del(dev, me_cl);
279 up_write(&dev->me_clients_rwsem);
280 }
281
282 /**
283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
284 *
285 * @dev: the device structure
286 * @uuid: me client uuid
287 * @id: me client id
288 *
289 * Locking: called under "dev->device_lock" lock
290 */
291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
292 {
293 struct mei_me_client *me_cl;
294
295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
296
297 down_write(&dev->me_clients_rwsem);
298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
299 __mei_me_cl_del(dev, me_cl);
300 up_write(&dev->me_clients_rwsem);
301 }
302
303 /**
304 * mei_me_cl_rm_all - remove all me clients
305 *
306 * @dev: the device structure
307 *
308 * Locking: called under "dev->device_lock" lock
309 */
310 void mei_me_cl_rm_all(struct mei_device *dev)
311 {
312 struct mei_me_client *me_cl, *next;
313
314 down_write(&dev->me_clients_rwsem);
315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
316 __mei_me_cl_del(dev, me_cl);
317 up_write(&dev->me_clients_rwsem);
318 }
319
320 /**
321 * mei_io_cb_free - free mei_cb_private related memory
322 *
323 * @cb: mei callback struct
324 */
325 void mei_io_cb_free(struct mei_cl_cb *cb)
326 {
327 if (cb == NULL)
328 return;
329
330 list_del(&cb->list);
331 kfree(cb->buf.data);
332 kfree(cb);
333 }
334
335 /**
336 * mei_tx_cb_queue - queue tx callback
337 *
338 * Locking: called under "dev->device_lock" lock
339 *
340 * @cb: mei callback struct
341 * @head: an instance of list to queue on
342 */
343 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
344 struct list_head *head)
345 {
346 list_add_tail(&cb->list, head);
347 cb->cl->tx_cb_queued++;
348 }
349
350 /**
351 * mei_tx_cb_dequeue - dequeue tx callback
352 *
353 * Locking: called under "dev->device_lock" lock
354 *
355 * @cb: mei callback struct to dequeue and free
356 */
357 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
358 {
359 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
360 cb->cl->tx_cb_queued--;
361
362 mei_io_cb_free(cb);
363 }
364
365 /**
366 * mei_io_cb_init - allocate and initialize io callback
367 *
368 * @cl: mei client
369 * @type: operation type
370 * @fp: pointer to file structure
371 *
372 * Return: mei_cl_cb pointer or NULL;
373 */
374 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
375 enum mei_cb_file_ops type,
376 const struct file *fp)
377 {
378 struct mei_cl_cb *cb;
379
380 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
381 if (!cb)
382 return NULL;
383
384 INIT_LIST_HEAD(&cb->list);
385 cb->fp = fp;
386 cb->cl = cl;
387 cb->buf_idx = 0;
388 cb->fop_type = type;
389 return cb;
390 }
391
392 /**
393 * mei_io_list_flush_cl - removes cbs belonging to the cl.
394 *
395 * @head: an instance of our list structure
396 * @cl: host client
397 */
398 static void mei_io_list_flush_cl(struct list_head *head,
399 const struct mei_cl *cl)
400 {
401 struct mei_cl_cb *cb, *next;
402
403 list_for_each_entry_safe(cb, next, head, list) {
404 if (cl == cb->cl)
405 list_del_init(&cb->list);
406 }
407 }
408
409 /**
410 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
411 *
412 * @head: An instance of our list structure
413 * @cl: host client
414 */
415 static void mei_io_tx_list_free_cl(struct list_head *head,
416 const struct mei_cl *cl)
417 {
418 struct mei_cl_cb *cb, *next;
419
420 list_for_each_entry_safe(cb, next, head, list) {
421 if (cl == cb->cl)
422 mei_tx_cb_dequeue(cb);
423 }
424 }
425
426 /**
427 * mei_io_list_free_fp - free cb from a list that matches file pointer
428 *
429 * @head: io list
430 * @fp: file pointer (matching cb file object), may be NULL
431 */
432 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
433 {
434 struct mei_cl_cb *cb, *next;
435
436 list_for_each_entry_safe(cb, next, head, list)
437 if (!fp || fp == cb->fp)
438 mei_io_cb_free(cb);
439 }
440
441 /**
442 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
443 *
444 * @cl: host client
445 * @length: size of the buffer
446 * @fop_type: operation type
447 * @fp: associated file pointer (might be NULL)
448 *
449 * Return: cb on success and NULL on failure
450 */
451 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
452 enum mei_cb_file_ops fop_type,
453 const struct file *fp)
454 {
455 struct mei_cl_cb *cb;
456
457 cb = mei_io_cb_init(cl, fop_type, fp);
458 if (!cb)
459 return NULL;
460
461 if (length == 0)
462 return cb;
463
464 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
465 if (!cb->buf.data) {
466 mei_io_cb_free(cb);
467 return NULL;
468 }
469 cb->buf.size = length;
470
471 return cb;
472 }
473
474 /**
475 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
476 * and enqueuing of the control commands cb
477 *
478 * @cl: host client
479 * @length: size of the buffer
480 * @fop_type: operation type
481 * @fp: associated file pointer (might be NULL)
482 *
483 * Return: cb on success and NULL on failure
484 * Locking: called under "dev->device_lock" lock
485 */
486 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
487 enum mei_cb_file_ops fop_type,
488 const struct file *fp)
489 {
490 struct mei_cl_cb *cb;
491
492 /* for RX always allocate at least client's mtu */
493 if (length)
494 length = max_t(size_t, length, mei_cl_mtu(cl));
495
496 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
497 if (!cb)
498 return NULL;
499
500 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
501 return cb;
502 }
503
504 /**
505 * mei_cl_read_cb - find this cl's callback in the read list
506 * for a specific file
507 *
508 * @cl: host client
509 * @fp: file pointer (matching cb file object), may be NULL
510 *
511 * Return: cb on success, NULL if cb is not found
512 */
513 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
514 {
515 struct mei_cl_cb *cb;
516
517 list_for_each_entry(cb, &cl->rd_completed, list)
518 if (!fp || fp == cb->fp)
519 return cb;
520
521 return NULL;
522 }
523
524 /**
525 * mei_cl_flush_queues - flushes queue lists belonging to cl.
526 *
527 * @cl: host client
528 * @fp: file pointer (matching cb file object), may be NULL
529 *
530 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
531 */
532 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
533 {
534 struct mei_device *dev;
535
536 if (WARN_ON(!cl || !cl->dev))
537 return -EINVAL;
538
539 dev = cl->dev;
540
541 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
542 mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
543 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
544 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
545 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
546 mei_io_list_free_fp(&cl->rd_pending, fp);
547 mei_io_list_free_fp(&cl->rd_completed, fp);
548
549 return 0;
550 }
551
552 /**
553 * mei_cl_init - initializes cl.
554 *
555 * @cl: host client to be initialized
556 * @dev: mei device
557 */
558 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
559 {
560 memset(cl, 0, sizeof(struct mei_cl));
561 init_waitqueue_head(&cl->wait);
562 init_waitqueue_head(&cl->rx_wait);
563 init_waitqueue_head(&cl->tx_wait);
564 init_waitqueue_head(&cl->ev_wait);
565 INIT_LIST_HEAD(&cl->rd_completed);
566 INIT_LIST_HEAD(&cl->rd_pending);
567 INIT_LIST_HEAD(&cl->link);
568 cl->writing_state = MEI_IDLE;
569 cl->state = MEI_FILE_UNINITIALIZED;
570 cl->dev = dev;
571 }
572
573 /**
574 * mei_cl_allocate - allocates cl structure and sets it up.
575 *
576 * @dev: mei device
577 * Return: The allocated file or NULL on failure
578 */
579 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
580 {
581 struct mei_cl *cl;
582
583 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
584 if (!cl)
585 return NULL;
586
587 mei_cl_init(cl, dev);
588
589 return cl;
590 }
591
592 /**
593 * mei_cl_link - allocate host id in the host map
594 *
595 * @cl: host client
596 *
597 * Return: 0 on success
598 * -EINVAL on incorrect values
599 * -EMFILE if open count exceeded.
600 */
601 int mei_cl_link(struct mei_cl *cl)
602 {
603 struct mei_device *dev;
604 int id;
605
606 if (WARN_ON(!cl || !cl->dev))
607 return -EINVAL;
608
609 dev = cl->dev;
610
611 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
612 if (id >= MEI_CLIENTS_MAX) {
613 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
614 return -EMFILE;
615 }
616
617 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
618 dev_err(dev->dev, "open_handle_count exceeded %d",
619 MEI_MAX_OPEN_HANDLE_COUNT);
620 return -EMFILE;
621 }
622
623 dev->open_handle_count++;
624
625 cl->host_client_id = id;
626 list_add_tail(&cl->link, &dev->file_list);
627
628 set_bit(id, dev->host_clients_map);
629
630 cl->state = MEI_FILE_INITIALIZING;
631
632 cl_dbg(dev, cl, "link cl\n");
633 return 0;
634 }
635
636 /**
637 * mei_cl_unlink - remove host client from the list
638 *
639 * @cl: host client
640 *
641 * Return: always 0
642 */
643 int mei_cl_unlink(struct mei_cl *cl)
644 {
645 struct mei_device *dev;
646
647 /* don't shout on error exit path */
648 if (!cl)
649 return 0;
650
651 if (WARN_ON(!cl->dev))
652 return 0;
653
654 dev = cl->dev;
655
656 cl_dbg(dev, cl, "unlink client");
657
658 if (dev->open_handle_count > 0)
659 dev->open_handle_count--;
660
661 /* never clear the 0 bit */
662 if (cl->host_client_id)
663 clear_bit(cl->host_client_id, dev->host_clients_map);
664
665 list_del_init(&cl->link);
666
667 cl->state = MEI_FILE_UNINITIALIZED;
668 cl->writing_state = MEI_IDLE;
669
670 WARN_ON(!list_empty(&cl->rd_completed) ||
671 !list_empty(&cl->rd_pending) ||
672 !list_empty(&cl->link));
673
674 return 0;
675 }
676
677 void mei_host_client_init(struct mei_device *dev)
678 {
679 dev->dev_state = MEI_DEV_ENABLED;
680 dev->reset_count = 0;
681
682 schedule_work(&dev->bus_rescan_work);
683
684 pm_runtime_mark_last_busy(dev->dev);
685 dev_dbg(dev->dev, "rpm: autosuspend\n");
686 pm_request_autosuspend(dev->dev);
687 }
688
689 /**
690 * mei_hbuf_acquire - try to acquire host buffer
691 *
692 * @dev: the device structure
693 * Return: true if host buffer was acquired
694 */
695 bool mei_hbuf_acquire(struct mei_device *dev)
696 {
697 if (mei_pg_state(dev) == MEI_PG_ON ||
698 mei_pg_in_transition(dev)) {
699 dev_dbg(dev->dev, "device is in pg\n");
700 return false;
701 }
702
703 if (!dev->hbuf_is_ready) {
704 dev_dbg(dev->dev, "hbuf is not ready\n");
705 return false;
706 }
707
708 dev->hbuf_is_ready = false;
709
710 return true;
711 }
712
713 /**
714 * mei_cl_wake_all - wake up readers, writers and event waiters so
715 * they can be interrupted
716 *
717 * @cl: host client
718 */
719 static void mei_cl_wake_all(struct mei_cl *cl)
720 {
721 struct mei_device *dev = cl->dev;
722
723 /* synchronized under device mutex */
724 if (waitqueue_active(&cl->rx_wait)) {
725 cl_dbg(dev, cl, "Waking up reading client!\n");
726 wake_up_interruptible(&cl->rx_wait);
727 }
728 /* synchronized under device mutex */
729 if (waitqueue_active(&cl->tx_wait)) {
730 cl_dbg(dev, cl, "Waking up writing client!\n");
731 wake_up_interruptible(&cl->tx_wait);
732 }
733 /* synchronized under device mutex */
734 if (waitqueue_active(&cl->ev_wait)) {
735 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
736 wake_up_interruptible(&cl->ev_wait);
737 }
738 /* synchronized under device mutex */
739 if (waitqueue_active(&cl->wait)) {
740 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
741 wake_up(&cl->wait);
742 }
743 }
744
745 /**
746 * mei_cl_set_disconnected - set disconnected state and clear
747 * associated states and resources
748 *
749 * @cl: host client
750 */
751 static void mei_cl_set_disconnected(struct mei_cl *cl)
752 {
753 struct mei_device *dev = cl->dev;
754
755 if (cl->state == MEI_FILE_DISCONNECTED ||
756 cl->state <= MEI_FILE_INITIALIZING)
757 return;
758
759 cl->state = MEI_FILE_DISCONNECTED;
760 mei_io_tx_list_free_cl(&dev->write_list, cl);
761 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
762 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
763 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
764 mei_cl_wake_all(cl);
765 cl->rx_flow_ctrl_creds = 0;
766 cl->tx_flow_ctrl_creds = 0;
767 cl->timer_count = 0;
768
769 if (!cl->me_cl)
770 return;
771
772 if (!WARN_ON(cl->me_cl->connect_count == 0))
773 cl->me_cl->connect_count--;
774
775 if (cl->me_cl->connect_count == 0)
776 cl->me_cl->tx_flow_ctrl_creds = 0;
777
778 mei_me_cl_put(cl->me_cl);
779 cl->me_cl = NULL;
780 }
781
782 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
783 {
784 if (!mei_me_cl_get(me_cl))
785 return -ENOENT;
786
787 /* only one connection is allowed for fixed address clients */
788 if (me_cl->props.fixed_address) {
789 if (me_cl->connect_count) {
790 mei_me_cl_put(me_cl);
791 return -EBUSY;
792 }
793 }
794
795 cl->me_cl = me_cl;
796 cl->state = MEI_FILE_CONNECTING;
797 cl->me_cl->connect_count++;
798
799 return 0;
800 }
801
802 /*
803 * mei_cl_send_disconnect - send disconnect request
804 *
805 * @cl: host client
806 * @cb: callback block
807 *
808 * Return: 0, OK; otherwise, error.
809 */
810 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
811 {
812 struct mei_device *dev;
813 int ret;
814
815 dev = cl->dev;
816
817 ret = mei_hbm_cl_disconnect_req(dev, cl);
818 cl->status = ret;
819 if (ret) {
820 cl->state = MEI_FILE_DISCONNECT_REPLY;
821 return ret;
822 }
823
824 list_move_tail(&cb->list, &dev->ctrl_rd_list);
825 cl->timer_count = MEI_CONNECT_TIMEOUT;
826 mei_schedule_stall_timer(dev);
827
828 return 0;
829 }
830
831 /**
832 * mei_cl_irq_disconnect - processes close related operation from
833 * interrupt thread context - send disconnect request
834 *
835 * @cl: client
836 * @cb: callback block.
837 * @cmpl_list: complete list.
838 *
839 * Return: 0, OK; otherwise, error.
840 */
841 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
842 struct list_head *cmpl_list)
843 {
844 struct mei_device *dev = cl->dev;
845 u32 msg_slots;
846 int slots;
847 int ret;
848
849 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
850 slots = mei_hbuf_empty_slots(dev);
851 if (slots < 0)
852 return -EOVERFLOW;
853
854 if ((u32)slots < msg_slots)
855 return -EMSGSIZE;
856
857 ret = mei_cl_send_disconnect(cl, cb);
858 if (ret)
859 list_move_tail(&cb->list, cmpl_list);
860
861 return ret;
862 }
863
864 /**
865 * __mei_cl_disconnect - disconnect host client from the me one
866 * internal function runtime pm has to be already acquired
867 *
868 * @cl: host client
869 *
870 * Return: 0 on success, <0 on failure.
871 */
872 static int __mei_cl_disconnect(struct mei_cl *cl)
873 {
874 struct mei_device *dev;
875 struct mei_cl_cb *cb;
876 int rets;
877
878 dev = cl->dev;
879
880 cl->state = MEI_FILE_DISCONNECTING;
881
882 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
883 if (!cb) {
884 rets = -ENOMEM;
885 goto out;
886 }
887
888 if (mei_hbuf_acquire(dev)) {
889 rets = mei_cl_send_disconnect(cl, cb);
890 if (rets) {
891 cl_err(dev, cl, "failed to disconnect.\n");
892 goto out;
893 }
894 }
895
896 mutex_unlock(&dev->device_lock);
897 wait_event_timeout(cl->wait,
898 cl->state == MEI_FILE_DISCONNECT_REPLY ||
899 cl->state == MEI_FILE_DISCONNECTED,
900 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
901 mutex_lock(&dev->device_lock);
902
903 rets = cl->status;
904 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
905 cl->state != MEI_FILE_DISCONNECTED) {
906 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
907 rets = -ETIME;
908 }
909
910 out:
911 /* we disconnect also on error */
912 mei_cl_set_disconnected(cl);
913 if (!rets)
914 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
915
916 mei_io_cb_free(cb);
917 return rets;
918 }
919
920 /**
921 * mei_cl_disconnect - disconnect host client from the me one
922 *
923 * @cl: host client
924 *
925 * Locking: called under "dev->device_lock" lock
926 *
927 * Return: 0 on success, <0 on failure.
928 */
929 int mei_cl_disconnect(struct mei_cl *cl)
930 {
931 struct mei_device *dev;
932 int rets;
933
934 if (WARN_ON(!cl || !cl->dev))
935 return -ENODEV;
936
937 dev = cl->dev;
938
939 cl_dbg(dev, cl, "disconnecting");
940
941 if (!mei_cl_is_connected(cl))
942 return 0;
943
944 if (mei_cl_is_fixed_address(cl)) {
945 mei_cl_set_disconnected(cl);
946 return 0;
947 }
948
949 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
950 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
951 mei_cl_set_disconnected(cl);
952 return 0;
953 }
954
955 rets = pm_runtime_get(dev->dev);
956 if (rets < 0 && rets != -EINPROGRESS) {
957 pm_runtime_put_noidle(dev->dev);
958 cl_err(dev, cl, "rpm: get failed %d\n", rets);
959 return rets;
960 }
961
962 rets = __mei_cl_disconnect(cl);
963
964 cl_dbg(dev, cl, "rpm: autosuspend\n");
965 pm_runtime_mark_last_busy(dev->dev);
966 pm_runtime_put_autosuspend(dev->dev);
967
968 return rets;
969 }
970
971
972 /**
973 * mei_cl_is_other_connecting - checks if other
974 * client with the same me client id is connecting
975 *
976 * @cl: private data of the file object
977 *
978 * Return: true if other client is connected, false - otherwise.
979 */
980 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
981 {
982 struct mei_device *dev;
983 struct mei_cl_cb *cb;
984
985 dev = cl->dev;
986
987 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
988 if (cb->fop_type == MEI_FOP_CONNECT &&
989 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
990 return true;
991 }
992
993 return false;
994 }
995
996 /**
997 * mei_cl_send_connect - send connect request
998 *
999 * @cl: host client
1000 * @cb: callback block
1001 *
1002 * Return: 0, OK; otherwise, error.
1003 */
1004 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1005 {
1006 struct mei_device *dev;
1007 int ret;
1008
1009 dev = cl->dev;
1010
1011 ret = mei_hbm_cl_connect_req(dev, cl);
1012 cl->status = ret;
1013 if (ret) {
1014 cl->state = MEI_FILE_DISCONNECT_REPLY;
1015 return ret;
1016 }
1017
1018 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1019 cl->timer_count = MEI_CONNECT_TIMEOUT;
1020 mei_schedule_stall_timer(dev);
1021 return 0;
1022 }
1023
1024 /**
1025 * mei_cl_irq_connect - send connect request in irq_thread context
1026 *
1027 * @cl: host client
1028 * @cb: callback block
1029 * @cmpl_list: complete list
1030 *
1031 * Return: 0, OK; otherwise, error.
1032 */
1033 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1034 struct list_head *cmpl_list)
1035 {
1036 struct mei_device *dev = cl->dev;
1037 u32 msg_slots;
1038 int slots;
1039 int rets;
1040
1041 if (mei_cl_is_other_connecting(cl))
1042 return 0;
1043
1044 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1045 slots = mei_hbuf_empty_slots(dev);
1046 if (slots < 0)
1047 return -EOVERFLOW;
1048
1049 if ((u32)slots < msg_slots)
1050 return -EMSGSIZE;
1051
1052 rets = mei_cl_send_connect(cl, cb);
1053 if (rets)
1054 list_move_tail(&cb->list, cmpl_list);
1055
1056 return rets;
1057 }
1058
1059 /**
1060 * mei_cl_connect - connect host client to the me one
1061 *
1062 * @cl: host client
1063 * @me_cl: me client
1064 * @fp: pointer to file structure
1065 *
1066 * Locking: called under "dev->device_lock" lock
1067 *
1068 * Return: 0 on success, <0 on failure.
1069 */
1070 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1071 const struct file *fp)
1072 {
1073 struct mei_device *dev;
1074 struct mei_cl_cb *cb;
1075 int rets;
1076
1077 if (WARN_ON(!cl || !cl->dev || !me_cl))
1078 return -ENODEV;
1079
1080 dev = cl->dev;
1081
1082 rets = mei_cl_set_connecting(cl, me_cl);
1083 if (rets)
1084 goto nortpm;
1085
1086 if (mei_cl_is_fixed_address(cl)) {
1087 cl->state = MEI_FILE_CONNECTED;
1088 rets = 0;
1089 goto nortpm;
1090 }
1091
1092 rets = pm_runtime_get(dev->dev);
1093 if (rets < 0 && rets != -EINPROGRESS) {
1094 pm_runtime_put_noidle(dev->dev);
1095 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1096 goto nortpm;
1097 }
1098
1099 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1100 if (!cb) {
1101 rets = -ENOMEM;
1102 goto out;
1103 }
1104
1105 /* run hbuf acquire last so we don't have to undo */
1106 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1107 rets = mei_cl_send_connect(cl, cb);
1108 if (rets)
1109 goto out;
1110 }
1111
1112 mutex_unlock(&dev->device_lock);
1113 wait_event_timeout(cl->wait,
1114 (cl->state == MEI_FILE_CONNECTED ||
1115 cl->state == MEI_FILE_DISCONNECTED ||
1116 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1117 cl->state == MEI_FILE_DISCONNECT_REPLY),
1118 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1119 mutex_lock(&dev->device_lock);
1120
1121 if (!mei_cl_is_connected(cl)) {
1122 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1123 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1124 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1125 /* ignore disconnect return valuue;
1126 * in case of failure reset will be invoked
1127 */
1128 __mei_cl_disconnect(cl);
1129 rets = -EFAULT;
1130 goto out;
1131 }
1132
1133 /* timeout or something went really wrong */
1134 if (!cl->status)
1135 cl->status = -EFAULT;
1136 }
1137
1138 rets = cl->status;
1139 out:
1140 cl_dbg(dev, cl, "rpm: autosuspend\n");
1141 pm_runtime_mark_last_busy(dev->dev);
1142 pm_runtime_put_autosuspend(dev->dev);
1143
1144 mei_io_cb_free(cb);
1145
1146 nortpm:
1147 if (!mei_cl_is_connected(cl))
1148 mei_cl_set_disconnected(cl);
1149
1150 return rets;
1151 }
1152
1153 /**
1154 * mei_cl_alloc_linked - allocate and link host client
1155 *
1156 * @dev: the device structure
1157 *
1158 * Return: cl on success ERR_PTR on failure
1159 */
1160 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1161 {
1162 struct mei_cl *cl;
1163 int ret;
1164
1165 cl = mei_cl_allocate(dev);
1166 if (!cl) {
1167 ret = -ENOMEM;
1168 goto err;
1169 }
1170
1171 ret = mei_cl_link(cl);
1172 if (ret)
1173 goto err;
1174
1175 return cl;
1176 err:
1177 kfree(cl);
1178 return ERR_PTR(ret);
1179 }
1180
1181 /**
1182 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1183 *
1184 * @cl: host client
1185 *
1186 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1187 */
1188 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1189 {
1190 if (WARN_ON(!cl || !cl->me_cl))
1191 return -EINVAL;
1192
1193 if (cl->tx_flow_ctrl_creds > 0)
1194 return 1;
1195
1196 if (mei_cl_is_fixed_address(cl))
1197 return 1;
1198
1199 if (mei_cl_is_single_recv_buf(cl)) {
1200 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1201 return 1;
1202 }
1203 return 0;
1204 }
1205
1206 /**
1207 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1208 * for a client
1209 *
1210 * @cl: host client
1211 *
1212 * Return:
1213 * 0 on success
1214 * -EINVAL when ctrl credits are <= 0
1215 */
1216 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1217 {
1218 if (WARN_ON(!cl || !cl->me_cl))
1219 return -EINVAL;
1220
1221 if (mei_cl_is_fixed_address(cl))
1222 return 0;
1223
1224 if (mei_cl_is_single_recv_buf(cl)) {
1225 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1226 return -EINVAL;
1227 cl->me_cl->tx_flow_ctrl_creds--;
1228 } else {
1229 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1230 return -EINVAL;
1231 cl->tx_flow_ctrl_creds--;
1232 }
1233 return 0;
1234 }
1235
1236 /**
1237 * mei_cl_notify_fop2req - convert fop to proper request
1238 *
1239 * @fop: client notification start response command
1240 *
1241 * Return: MEI_HBM_NOTIFICATION_START/STOP
1242 */
1243 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1244 {
1245 if (fop == MEI_FOP_NOTIFY_START)
1246 return MEI_HBM_NOTIFICATION_START;
1247 else
1248 return MEI_HBM_NOTIFICATION_STOP;
1249 }
1250
1251 /**
1252 * mei_cl_notify_req2fop - convert notification request top file operation type
1253 *
1254 * @req: hbm notification request type
1255 *
1256 * Return: MEI_FOP_NOTIFY_START/STOP
1257 */
1258 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1259 {
1260 if (req == MEI_HBM_NOTIFICATION_START)
1261 return MEI_FOP_NOTIFY_START;
1262 else
1263 return MEI_FOP_NOTIFY_STOP;
1264 }
1265
1266 /**
1267 * mei_cl_irq_notify - send notification request in irq_thread context
1268 *
1269 * @cl: client
1270 * @cb: callback block.
1271 * @cmpl_list: complete list.
1272 *
1273 * Return: 0 on such and error otherwise.
1274 */
1275 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1276 struct list_head *cmpl_list)
1277 {
1278 struct mei_device *dev = cl->dev;
1279 u32 msg_slots;
1280 int slots;
1281 int ret;
1282 bool request;
1283
1284 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1285 slots = mei_hbuf_empty_slots(dev);
1286 if (slots < 0)
1287 return -EOVERFLOW;
1288
1289 if ((u32)slots < msg_slots)
1290 return -EMSGSIZE;
1291
1292 request = mei_cl_notify_fop2req(cb->fop_type);
1293 ret = mei_hbm_cl_notify_req(dev, cl, request);
1294 if (ret) {
1295 cl->status = ret;
1296 list_move_tail(&cb->list, cmpl_list);
1297 return ret;
1298 }
1299
1300 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1301 return 0;
1302 }
1303
1304 /**
1305 * mei_cl_notify_request - send notification stop/start request
1306 *
1307 * @cl: host client
1308 * @fp: associate request with file
1309 * @request: 1 for start or 0 for stop
1310 *
1311 * Locking: called under "dev->device_lock" lock
1312 *
1313 * Return: 0 on such and error otherwise.
1314 */
1315 int mei_cl_notify_request(struct mei_cl *cl,
1316 const struct file *fp, u8 request)
1317 {
1318 struct mei_device *dev;
1319 struct mei_cl_cb *cb;
1320 enum mei_cb_file_ops fop_type;
1321 int rets;
1322
1323 if (WARN_ON(!cl || !cl->dev))
1324 return -ENODEV;
1325
1326 dev = cl->dev;
1327
1328 if (!dev->hbm_f_ev_supported) {
1329 cl_dbg(dev, cl, "notifications not supported\n");
1330 return -EOPNOTSUPP;
1331 }
1332
1333 if (!mei_cl_is_connected(cl))
1334 return -ENODEV;
1335
1336 rets = pm_runtime_get(dev->dev);
1337 if (rets < 0 && rets != -EINPROGRESS) {
1338 pm_runtime_put_noidle(dev->dev);
1339 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1340 return rets;
1341 }
1342
1343 fop_type = mei_cl_notify_req2fop(request);
1344 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1345 if (!cb) {
1346 rets = -ENOMEM;
1347 goto out;
1348 }
1349
1350 if (mei_hbuf_acquire(dev)) {
1351 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1352 rets = -ENODEV;
1353 goto out;
1354 }
1355 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1356 }
1357
1358 mutex_unlock(&dev->device_lock);
1359 wait_event_timeout(cl->wait,
1360 cl->notify_en == request ||
1361 cl->status ||
1362 !mei_cl_is_connected(cl),
1363 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1364 mutex_lock(&dev->device_lock);
1365
1366 if (cl->notify_en != request && !cl->status)
1367 cl->status = -EFAULT;
1368
1369 rets = cl->status;
1370
1371 out:
1372 cl_dbg(dev, cl, "rpm: autosuspend\n");
1373 pm_runtime_mark_last_busy(dev->dev);
1374 pm_runtime_put_autosuspend(dev->dev);
1375
1376 mei_io_cb_free(cb);
1377 return rets;
1378 }
1379
1380 /**
1381 * mei_cl_notify - raise notification
1382 *
1383 * @cl: host client
1384 *
1385 * Locking: called under "dev->device_lock" lock
1386 */
1387 void mei_cl_notify(struct mei_cl *cl)
1388 {
1389 struct mei_device *dev;
1390
1391 if (!cl || !cl->dev)
1392 return;
1393
1394 dev = cl->dev;
1395
1396 if (!cl->notify_en)
1397 return;
1398
1399 cl_dbg(dev, cl, "notify event");
1400 cl->notify_ev = true;
1401 if (!mei_cl_bus_notify_event(cl))
1402 wake_up_interruptible(&cl->ev_wait);
1403
1404 if (cl->ev_async)
1405 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1406
1407 }
1408
1409 /**
1410 * mei_cl_notify_get - get or wait for notification event
1411 *
1412 * @cl: host client
1413 * @block: this request is blocking
1414 * @notify_ev: true if notification event was received
1415 *
1416 * Locking: called under "dev->device_lock" lock
1417 *
1418 * Return: 0 on such and error otherwise.
1419 */
1420 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1421 {
1422 struct mei_device *dev;
1423 int rets;
1424
1425 *notify_ev = false;
1426
1427 if (WARN_ON(!cl || !cl->dev))
1428 return -ENODEV;
1429
1430 dev = cl->dev;
1431
1432 if (!dev->hbm_f_ev_supported) {
1433 cl_dbg(dev, cl, "notifications not supported\n");
1434 return -EOPNOTSUPP;
1435 }
1436
1437 if (!mei_cl_is_connected(cl))
1438 return -ENODEV;
1439
1440 if (cl->notify_ev)
1441 goto out;
1442
1443 if (!block)
1444 return -EAGAIN;
1445
1446 mutex_unlock(&dev->device_lock);
1447 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1448 mutex_lock(&dev->device_lock);
1449
1450 if (rets < 0)
1451 return rets;
1452
1453 out:
1454 *notify_ev = cl->notify_ev;
1455 cl->notify_ev = false;
1456 return 0;
1457 }
1458
1459 /**
1460 * mei_cl_read_start - the start read client message function.
1461 *
1462 * @cl: host client
1463 * @length: number of bytes to read
1464 * @fp: pointer to file structure
1465 *
1466 * Return: 0 on success, <0 on failure.
1467 */
1468 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1469 {
1470 struct mei_device *dev;
1471 struct mei_cl_cb *cb;
1472 int rets;
1473
1474 if (WARN_ON(!cl || !cl->dev))
1475 return -ENODEV;
1476
1477 dev = cl->dev;
1478
1479 if (!mei_cl_is_connected(cl))
1480 return -ENODEV;
1481
1482 if (!mei_me_cl_is_active(cl->me_cl)) {
1483 cl_err(dev, cl, "no such me client\n");
1484 return -ENOTTY;
1485 }
1486
1487 if (mei_cl_is_fixed_address(cl))
1488 return 0;
1489
1490 /* HW currently supports only one pending read */
1491 if (cl->rx_flow_ctrl_creds)
1492 return -EBUSY;
1493
1494 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1495 if (!cb)
1496 return -ENOMEM;
1497
1498 rets = pm_runtime_get(dev->dev);
1499 if (rets < 0 && rets != -EINPROGRESS) {
1500 pm_runtime_put_noidle(dev->dev);
1501 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1502 goto nortpm;
1503 }
1504
1505 rets = 0;
1506 if (mei_hbuf_acquire(dev)) {
1507 rets = mei_hbm_cl_flow_control_req(dev, cl);
1508 if (rets < 0)
1509 goto out;
1510
1511 list_move_tail(&cb->list, &cl->rd_pending);
1512 }
1513 cl->rx_flow_ctrl_creds++;
1514
1515 out:
1516 cl_dbg(dev, cl, "rpm: autosuspend\n");
1517 pm_runtime_mark_last_busy(dev->dev);
1518 pm_runtime_put_autosuspend(dev->dev);
1519 nortpm:
1520 if (rets)
1521 mei_io_cb_free(cb);
1522
1523 return rets;
1524 }
1525
1526 /**
1527 * mei_msg_hdr_init - initialize mei message header
1528 *
1529 * @mei_hdr: mei message header
1530 * @cb: message callback structure
1531 */
1532 static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
1533 {
1534 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1535 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1536 mei_hdr->length = 0;
1537 mei_hdr->reserved = 0;
1538 mei_hdr->msg_complete = 0;
1539 mei_hdr->dma_ring = 0;
1540 mei_hdr->internal = cb->internal;
1541 }
1542
1543 /**
1544 * mei_cl_irq_write - write a message to device
1545 * from the interrupt thread context
1546 *
1547 * @cl: client
1548 * @cb: callback block.
1549 * @cmpl_list: complete list.
1550 *
1551 * Return: 0, OK; otherwise error.
1552 */
1553 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1554 struct list_head *cmpl_list)
1555 {
1556 struct mei_device *dev;
1557 struct mei_msg_data *buf;
1558 struct mei_msg_hdr mei_hdr;
1559 size_t hdr_len = sizeof(mei_hdr);
1560 size_t len;
1561 size_t hbuf_len, dr_len;
1562 int hbuf_slots;
1563 u32 dr_slots;
1564 u32 dma_len;
1565 int rets;
1566 bool first_chunk;
1567 const void *data;
1568
1569 if (WARN_ON(!cl || !cl->dev))
1570 return -ENODEV;
1571
1572 dev = cl->dev;
1573
1574 buf = &cb->buf;
1575
1576 first_chunk = cb->buf_idx == 0;
1577
1578 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1579 if (rets < 0)
1580 goto err;
1581
1582 if (rets == 0) {
1583 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1584 return 0;
1585 }
1586
1587 len = buf->size - cb->buf_idx;
1588 data = buf->data + cb->buf_idx;
1589 hbuf_slots = mei_hbuf_empty_slots(dev);
1590 if (hbuf_slots < 0) {
1591 rets = -EOVERFLOW;
1592 goto err;
1593 }
1594
1595 hbuf_len = mei_slots2data(hbuf_slots);
1596 dr_slots = mei_dma_ring_empty_slots(dev);
1597 dr_len = mei_slots2data(dr_slots);
1598
1599 mei_msg_hdr_init(&mei_hdr, cb);
1600
1601 /**
1602 * Split the message only if we can write the whole host buffer
1603 * otherwise wait for next time the host buffer is empty.
1604 */
1605 if (len + hdr_len <= hbuf_len) {
1606 mei_hdr.length = len;
1607 mei_hdr.msg_complete = 1;
1608 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1609 mei_hdr.dma_ring = 1;
1610 if (len > dr_len)
1611 len = dr_len;
1612 else
1613 mei_hdr.msg_complete = 1;
1614
1615 mei_hdr.length = sizeof(dma_len);
1616 dma_len = len;
1617 data = &dma_len;
1618 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1619 len = hbuf_len - hdr_len;
1620 mei_hdr.length = len;
1621 } else {
1622 return 0;
1623 }
1624
1625 if (mei_hdr.dma_ring)
1626 mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
1627
1628 rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
1629 if (rets)
1630 goto err;
1631
1632 cl->status = 0;
1633 cl->writing_state = MEI_WRITING;
1634 cb->buf_idx += len;
1635
1636 if (first_chunk) {
1637 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1638 rets = -EIO;
1639 goto err;
1640 }
1641 }
1642
1643 if (mei_hdr.msg_complete)
1644 list_move_tail(&cb->list, &dev->write_waiting_list);
1645
1646 return 0;
1647
1648 err:
1649 cl->status = rets;
1650 list_move_tail(&cb->list, cmpl_list);
1651 return rets;
1652 }
1653
1654 /**
1655 * mei_cl_write - submit a write cb to mei device
1656 * assumes device_lock is locked
1657 *
1658 * @cl: host client
1659 * @cb: write callback with filled data
1660 *
1661 * Return: number of bytes sent on success, <0 on failure.
1662 */
1663 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1664 {
1665 struct mei_device *dev;
1666 struct mei_msg_data *buf;
1667 struct mei_msg_hdr mei_hdr;
1668 size_t hdr_len = sizeof(mei_hdr);
1669 size_t len, hbuf_len, dr_len;
1670 int hbuf_slots;
1671 u32 dr_slots;
1672 u32 dma_len;
1673 ssize_t rets;
1674 bool blocking;
1675 const void *data;
1676
1677 if (WARN_ON(!cl || !cl->dev))
1678 return -ENODEV;
1679
1680 if (WARN_ON(!cb))
1681 return -EINVAL;
1682
1683 dev = cl->dev;
1684
1685 buf = &cb->buf;
1686 len = buf->size;
1687
1688 cl_dbg(dev, cl, "len=%zd\n", len);
1689
1690 blocking = cb->blocking;
1691 data = buf->data;
1692
1693 rets = pm_runtime_get(dev->dev);
1694 if (rets < 0 && rets != -EINPROGRESS) {
1695 pm_runtime_put_noidle(dev->dev);
1696 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1697 goto free;
1698 }
1699
1700 cb->buf_idx = 0;
1701 cl->writing_state = MEI_IDLE;
1702
1703
1704 rets = mei_cl_tx_flow_ctrl_creds(cl);
1705 if (rets < 0)
1706 goto err;
1707
1708 mei_msg_hdr_init(&mei_hdr, cb);
1709
1710 if (rets == 0) {
1711 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1712 rets = len;
1713 goto out;
1714 }
1715
1716 if (!mei_hbuf_acquire(dev)) {
1717 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1718 rets = len;
1719 goto out;
1720 }
1721
1722 hbuf_slots = mei_hbuf_empty_slots(dev);
1723 if (hbuf_slots < 0) {
1724 rets = -EOVERFLOW;
1725 goto out;
1726 }
1727
1728 hbuf_len = mei_slots2data(hbuf_slots);
1729 dr_slots = mei_dma_ring_empty_slots(dev);
1730 dr_len = mei_slots2data(dr_slots);
1731
1732 if (len + hdr_len <= hbuf_len) {
1733 mei_hdr.length = len;
1734 mei_hdr.msg_complete = 1;
1735 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1736 mei_hdr.dma_ring = 1;
1737 if (len > dr_len)
1738 len = dr_len;
1739 else
1740 mei_hdr.msg_complete = 1;
1741
1742 mei_hdr.length = sizeof(dma_len);
1743 dma_len = len;
1744 data = &dma_len;
1745 } else {
1746 len = hbuf_len - hdr_len;
1747 mei_hdr.length = len;
1748 }
1749
1750 if (mei_hdr.dma_ring)
1751 mei_dma_ring_write(dev, buf->data, len);
1752
1753 rets = mei_write_message(dev, &mei_hdr, hdr_len,
1754 data, mei_hdr.length);
1755 if (rets)
1756 goto err;
1757
1758 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1759 if (rets)
1760 goto err;
1761
1762 cl->writing_state = MEI_WRITING;
1763 cb->buf_idx = len;
1764 /* restore return value */
1765 len = buf->size;
1766
1767 out:
1768 if (mei_hdr.msg_complete)
1769 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
1770 else
1771 mei_tx_cb_enqueue(cb, &dev->write_list);
1772
1773 cb = NULL;
1774 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1775
1776 mutex_unlock(&dev->device_lock);
1777 rets = wait_event_interruptible(cl->tx_wait,
1778 cl->writing_state == MEI_WRITE_COMPLETE ||
1779 (!mei_cl_is_connected(cl)));
1780 mutex_lock(&dev->device_lock);
1781 /* wait_event_interruptible returns -ERESTARTSYS */
1782 if (rets) {
1783 if (signal_pending(current))
1784 rets = -EINTR;
1785 goto err;
1786 }
1787 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1788 rets = -EFAULT;
1789 goto err;
1790 }
1791 }
1792
1793 rets = len;
1794 err:
1795 cl_dbg(dev, cl, "rpm: autosuspend\n");
1796 pm_runtime_mark_last_busy(dev->dev);
1797 pm_runtime_put_autosuspend(dev->dev);
1798 free:
1799 mei_io_cb_free(cb);
1800
1801 return rets;
1802 }
1803
1804
1805 /**
1806 * mei_cl_complete - processes completed operation for a client
1807 *
1808 * @cl: private data of the file object.
1809 * @cb: callback block.
1810 */
1811 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1812 {
1813 struct mei_device *dev = cl->dev;
1814
1815 switch (cb->fop_type) {
1816 case MEI_FOP_WRITE:
1817 mei_tx_cb_dequeue(cb);
1818 cl->writing_state = MEI_WRITE_COMPLETE;
1819 if (waitqueue_active(&cl->tx_wait)) {
1820 wake_up_interruptible(&cl->tx_wait);
1821 } else {
1822 pm_runtime_mark_last_busy(dev->dev);
1823 pm_request_autosuspend(dev->dev);
1824 }
1825 break;
1826
1827 case MEI_FOP_READ:
1828 list_add_tail(&cb->list, &cl->rd_completed);
1829 if (!mei_cl_is_fixed_address(cl) &&
1830 !WARN_ON(!cl->rx_flow_ctrl_creds))
1831 cl->rx_flow_ctrl_creds--;
1832 if (!mei_cl_bus_rx_event(cl))
1833 wake_up_interruptible(&cl->rx_wait);
1834 break;
1835
1836 case MEI_FOP_CONNECT:
1837 case MEI_FOP_DISCONNECT:
1838 case MEI_FOP_NOTIFY_STOP:
1839 case MEI_FOP_NOTIFY_START:
1840 if (waitqueue_active(&cl->wait))
1841 wake_up(&cl->wait);
1842
1843 break;
1844 case MEI_FOP_DISCONNECT_RSP:
1845 mei_io_cb_free(cb);
1846 mei_cl_set_disconnected(cl);
1847 break;
1848 default:
1849 BUG_ON(0);
1850 }
1851 }
1852
1853
1854 /**
1855 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1856 *
1857 * @dev: mei device
1858 */
1859 void mei_cl_all_disconnect(struct mei_device *dev)
1860 {
1861 struct mei_cl *cl;
1862
1863 list_for_each_entry(cl, &dev->file_list, link)
1864 mei_cl_set_disconnected(cl);
1865 }