]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/hid/intel-ish-hid/ishtp/client.c
Merge tag 'ktest-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[mirror_ubuntu-jammy-kernel.git] / drivers / hid / intel-ish-hid / ishtp / client.c
1 /*
2 * ISHTP client logic
3 *
4 * Copyright (c) 2003-2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include "hbm.h"
23 #include "client.h"
24
25 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
26 {
27 unsigned long tx_free_flags;
28 int size;
29
30 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
31 size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
32 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
33
34 return size;
35 }
36 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
37
38 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
39 {
40 return cl->tx_ring_free_size;
41 }
42 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
43
44 /**
45 * ishtp_read_list_flush() - Flush read queue
46 * @cl: ishtp client instance
47 *
48 * Used to remove all entries from read queue for a client
49 */
50 static void ishtp_read_list_flush(struct ishtp_cl *cl)
51 {
52 struct ishtp_cl_rb *rb;
53 struct ishtp_cl_rb *next;
54 unsigned long flags;
55
56 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
57 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
58 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
59 list_del(&rb->list);
60 ishtp_io_rb_free(rb);
61 }
62 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
63 }
64
65 /**
66 * ishtp_cl_flush_queues() - Flush all queues for a client
67 * @cl: ishtp client instance
68 *
69 * Used to remove all queues for a client. This is called when a client device
70 * needs reset due to error, S3 resume or during module removal
71 *
72 * Return: 0 on success else -EINVAL if device is NULL
73 */
74 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
75 {
76 if (WARN_ON(!cl || !cl->dev))
77 return -EINVAL;
78
79 ishtp_read_list_flush(cl);
80
81 return 0;
82 }
83 EXPORT_SYMBOL(ishtp_cl_flush_queues);
84
85 /**
86 * ishtp_cl_init() - Initialize all fields of a client device
87 * @cl: ishtp client instance
88 * @dev: ishtp device
89 *
90 * Initializes a client device fields: Init spinlocks, init queues etc.
91 * This function is called during new client creation
92 */
93 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
94 {
95 memset(cl, 0, sizeof(struct ishtp_cl));
96 init_waitqueue_head(&cl->wait_ctrl_res);
97 spin_lock_init(&cl->free_list_spinlock);
98 spin_lock_init(&cl->in_process_spinlock);
99 spin_lock_init(&cl->tx_list_spinlock);
100 spin_lock_init(&cl->tx_free_list_spinlock);
101 spin_lock_init(&cl->fc_spinlock);
102 INIT_LIST_HEAD(&cl->link);
103 cl->dev = dev;
104
105 INIT_LIST_HEAD(&cl->free_rb_list.list);
106 INIT_LIST_HEAD(&cl->tx_list.list);
107 INIT_LIST_HEAD(&cl->tx_free_list.list);
108 INIT_LIST_HEAD(&cl->in_process_list.list);
109
110 cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
111 cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
112 cl->tx_ring_free_size = cl->tx_ring_size;
113
114 /* dma */
115 cl->last_tx_path = CL_TX_PATH_IPC;
116 cl->last_dma_acked = 1;
117 cl->last_dma_addr = NULL;
118 cl->last_ipc_acked = 1;
119 }
120
121 /**
122 * ishtp_cl_allocate() - allocates client structure and sets it up.
123 * @dev: ishtp device
124 *
125 * Allocate memory for new client device and call to initialize each field.
126 *
127 * Return: The allocated client instance or NULL on failure
128 */
129 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
130 {
131 struct ishtp_cl *cl;
132
133 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
134 if (!cl)
135 return NULL;
136
137 ishtp_cl_init(cl, cl_device->ishtp_dev);
138 return cl;
139 }
140 EXPORT_SYMBOL(ishtp_cl_allocate);
141
142 /**
143 * ishtp_cl_free() - Frees a client device
144 * @cl: client device instance
145 *
146 * Frees a client device
147 */
148 void ishtp_cl_free(struct ishtp_cl *cl)
149 {
150 struct ishtp_device *dev;
151 unsigned long flags;
152
153 if (!cl)
154 return;
155
156 dev = cl->dev;
157 if (!dev)
158 return;
159
160 spin_lock_irqsave(&dev->cl_list_lock, flags);
161 ishtp_cl_free_rx_ring(cl);
162 ishtp_cl_free_tx_ring(cl);
163 kfree(cl);
164 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
165 }
166 EXPORT_SYMBOL(ishtp_cl_free);
167
168 /**
169 * ishtp_cl_link() - Reserve a host id and link the client instance
170 * @cl: client device instance
171 *
172 * This allocates a single bit in the hostmap. This function will make sure
173 * that not many client sessions are opened at the same time. Once allocated
174 * the client device instance is added to the ishtp device in the current
175 * client list
176 *
177 * Return: 0 or error code on failure
178 */
179 int ishtp_cl_link(struct ishtp_cl *cl)
180 {
181 struct ishtp_device *dev;
182 unsigned long flags, flags_cl;
183 int id, ret = 0;
184
185 if (WARN_ON(!cl || !cl->dev))
186 return -EINVAL;
187
188 dev = cl->dev;
189
190 spin_lock_irqsave(&dev->device_lock, flags);
191
192 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
193 ret = -EMFILE;
194 goto unlock_dev;
195 }
196
197 id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
198
199 if (id >= ISHTP_CLIENTS_MAX) {
200 spin_unlock_irqrestore(&dev->device_lock, flags);
201 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
202 return -ENOENT;
203 }
204
205 dev->open_handle_count++;
206 cl->host_client_id = id;
207 spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
208 if (dev->dev_state != ISHTP_DEV_ENABLED) {
209 ret = -ENODEV;
210 goto unlock_cl;
211 }
212 list_add_tail(&cl->link, &dev->cl_list);
213 set_bit(id, dev->host_clients_map);
214 cl->state = ISHTP_CL_INITIALIZING;
215
216 unlock_cl:
217 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
218 unlock_dev:
219 spin_unlock_irqrestore(&dev->device_lock, flags);
220 return ret;
221 }
222 EXPORT_SYMBOL(ishtp_cl_link);
223
224 /**
225 * ishtp_cl_unlink() - remove fw_cl from the client device list
226 * @cl: client device instance
227 *
228 * Remove a previously linked device to a ishtp device
229 */
230 void ishtp_cl_unlink(struct ishtp_cl *cl)
231 {
232 struct ishtp_device *dev;
233 struct ishtp_cl *pos;
234 unsigned long flags;
235
236 /* don't shout on error exit path */
237 if (!cl || !cl->dev)
238 return;
239
240 dev = cl->dev;
241
242 spin_lock_irqsave(&dev->device_lock, flags);
243 if (dev->open_handle_count > 0) {
244 clear_bit(cl->host_client_id, dev->host_clients_map);
245 dev->open_handle_count--;
246 }
247 spin_unlock_irqrestore(&dev->device_lock, flags);
248
249 /*
250 * This checks that 'cl' is actually linked into device's structure,
251 * before attempting 'list_del'
252 */
253 spin_lock_irqsave(&dev->cl_list_lock, flags);
254 list_for_each_entry(pos, &dev->cl_list, link)
255 if (cl->host_client_id == pos->host_client_id) {
256 list_del_init(&pos->link);
257 break;
258 }
259 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
260 }
261 EXPORT_SYMBOL(ishtp_cl_unlink);
262
263 /**
264 * ishtp_cl_disconnect() - Send disconnect request to firmware
265 * @cl: client device instance
266 *
267 * Send a disconnect request for a client to firmware.
268 *
269 * Return: 0 if successful disconnect response from the firmware or error
270 * code on failure
271 */
272 int ishtp_cl_disconnect(struct ishtp_cl *cl)
273 {
274 struct ishtp_device *dev;
275 int err;
276
277 if (WARN_ON(!cl || !cl->dev))
278 return -ENODEV;
279
280 dev = cl->dev;
281
282 dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
283
284 if (cl->state != ISHTP_CL_DISCONNECTING) {
285 dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
286 return 0;
287 }
288
289 if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
290 dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
291 dev_err(&cl->device->dev, "failed to disconnect.\n");
292 return -ENODEV;
293 }
294
295 err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
296 (dev->dev_state != ISHTP_DEV_ENABLED ||
297 cl->state == ISHTP_CL_DISCONNECTED),
298 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
299
300 /*
301 * If FW reset arrived, this will happen. Don't check cl->,
302 * as 'cl' may be freed already
303 */
304 if (dev->dev_state != ISHTP_DEV_ENABLED) {
305 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
306 __func__);
307 return -ENODEV;
308 }
309
310 if (cl->state == ISHTP_CL_DISCONNECTED) {
311 dev->print_log(dev, "%s() successful\n", __func__);
312 return 0;
313 }
314
315 return -ENODEV;
316 }
317 EXPORT_SYMBOL(ishtp_cl_disconnect);
318
319 /**
320 * ishtp_cl_is_other_connecting() - Check other client is connecting
321 * @cl: client device instance
322 *
323 * Checks if other client with the same fw client id is connecting
324 *
325 * Return: true if other client is connected else false
326 */
327 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
328 {
329 struct ishtp_device *dev;
330 struct ishtp_cl *pos;
331 unsigned long flags;
332
333 if (WARN_ON(!cl || !cl->dev))
334 return false;
335
336 dev = cl->dev;
337 spin_lock_irqsave(&dev->cl_list_lock, flags);
338 list_for_each_entry(pos, &dev->cl_list, link) {
339 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
340 cl->fw_client_id == pos->fw_client_id) {
341 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
342 return true;
343 }
344 }
345 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
346
347 return false;
348 }
349
350 /**
351 * ishtp_cl_connect() - Send connect request to firmware
352 * @cl: client device instance
353 *
354 * Send a connect request for a client to firmware. If successful it will
355 * RX and TX ring buffers
356 *
357 * Return: 0 if successful connect response from the firmware and able
358 * to bind and allocate ring buffers or error code on failure
359 */
360 int ishtp_cl_connect(struct ishtp_cl *cl)
361 {
362 struct ishtp_device *dev;
363 int rets;
364
365 if (WARN_ON(!cl || !cl->dev))
366 return -ENODEV;
367
368 dev = cl->dev;
369
370 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
371
372 if (ishtp_cl_is_other_connecting(cl)) {
373 dev->print_log(dev, "%s() Busy\n", __func__);
374 return -EBUSY;
375 }
376
377 if (ishtp_hbm_cl_connect_req(dev, cl)) {
378 dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
379 return -ENODEV;
380 }
381
382 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
383 (dev->dev_state == ISHTP_DEV_ENABLED &&
384 (cl->state == ISHTP_CL_CONNECTED ||
385 cl->state == ISHTP_CL_DISCONNECTED)),
386 ishtp_secs_to_jiffies(
387 ISHTP_CL_CONNECT_TIMEOUT));
388 /*
389 * If FW reset arrived, this will happen. Don't check cl->,
390 * as 'cl' may be freed already
391 */
392 if (dev->dev_state != ISHTP_DEV_ENABLED) {
393 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
394 __func__);
395 return -EFAULT;
396 }
397
398 if (cl->state != ISHTP_CL_CONNECTED) {
399 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
400 __func__);
401 return -EFAULT;
402 }
403
404 rets = cl->status;
405 if (rets) {
406 dev->print_log(dev, "%s() Invalid status\n", __func__);
407 return rets;
408 }
409
410 rets = ishtp_cl_device_bind(cl);
411 if (rets) {
412 dev->print_log(dev, "%s() Bind error\n", __func__);
413 ishtp_cl_disconnect(cl);
414 return rets;
415 }
416
417 rets = ishtp_cl_alloc_rx_ring(cl);
418 if (rets) {
419 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
420 /* if failed allocation, disconnect */
421 ishtp_cl_disconnect(cl);
422 return rets;
423 }
424
425 rets = ishtp_cl_alloc_tx_ring(cl);
426 if (rets) {
427 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
428 /* if failed allocation, disconnect */
429 ishtp_cl_free_rx_ring(cl);
430 ishtp_cl_disconnect(cl);
431 return rets;
432 }
433
434 /* Upon successful connection and allocation, emit flow-control */
435 rets = ishtp_cl_read_start(cl);
436
437 dev->print_log(dev, "%s() successful\n", __func__);
438
439 return rets;
440 }
441 EXPORT_SYMBOL(ishtp_cl_connect);
442
443 /**
444 * ishtp_cl_read_start() - Prepare to read client message
445 * @cl: client device instance
446 *
447 * Get a free buffer from pool of free read buffers and add to read buffer
448 * pool to add contents. Send a flow control request to firmware to be able
449 * send next message.
450 *
451 * Return: 0 if successful or error code on failure
452 */
453 int ishtp_cl_read_start(struct ishtp_cl *cl)
454 {
455 struct ishtp_device *dev;
456 struct ishtp_cl_rb *rb;
457 int rets;
458 int i;
459 unsigned long flags;
460 unsigned long dev_flags;
461
462 if (WARN_ON(!cl || !cl->dev))
463 return -ENODEV;
464
465 dev = cl->dev;
466
467 if (cl->state != ISHTP_CL_CONNECTED)
468 return -ENODEV;
469
470 if (dev->dev_state != ISHTP_DEV_ENABLED)
471 return -ENODEV;
472
473 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
474 if (i < 0) {
475 dev_err(&cl->device->dev, "no such fw client %d\n",
476 cl->fw_client_id);
477 return -ENODEV;
478 }
479
480 /* The current rb is the head of the free rb list */
481 spin_lock_irqsave(&cl->free_list_spinlock, flags);
482 if (list_empty(&cl->free_rb_list.list)) {
483 dev_warn(&cl->device->dev,
484 "[ishtp-ish] Rx buffers pool is empty\n");
485 rets = -ENOMEM;
486 rb = NULL;
487 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
488 goto out;
489 }
490 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
491 list_del_init(&rb->list);
492 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
493
494 rb->cl = cl;
495 rb->buf_idx = 0;
496
497 INIT_LIST_HEAD(&rb->list);
498 rets = 0;
499
500 /*
501 * This must be BEFORE sending flow control -
502 * response in ISR may come too fast...
503 */
504 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
505 list_add_tail(&rb->list, &dev->read_list.list);
506 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
507 if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
508 rets = -ENODEV;
509 goto out;
510 }
511 out:
512 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
513 if (rets && rb) {
514 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
515 list_del(&rb->list);
516 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
517
518 spin_lock_irqsave(&cl->free_list_spinlock, flags);
519 list_add_tail(&rb->list, &cl->free_rb_list.list);
520 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
521 }
522 return rets;
523 }
524
525 /**
526 * ishtp_cl_send() - Send a message to firmware
527 * @cl: client device instance
528 * @buf: message buffer
529 * @length: length of message
530 *
531 * If the client is correct state to send message, this function gets a buffer
532 * from tx ring buffers, copy the message data and call to send the message
533 * using ishtp_cl_send_msg()
534 *
535 * Return: 0 if successful or error code on failure
536 */
537 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
538 {
539 struct ishtp_device *dev;
540 int id;
541 struct ishtp_cl_tx_ring *cl_msg;
542 int have_msg_to_send = 0;
543 unsigned long tx_flags, tx_free_flags;
544
545 if (WARN_ON(!cl || !cl->dev))
546 return -ENODEV;
547
548 dev = cl->dev;
549
550 if (cl->state != ISHTP_CL_CONNECTED) {
551 ++cl->err_send_msg;
552 return -EPIPE;
553 }
554
555 if (dev->dev_state != ISHTP_DEV_ENABLED) {
556 ++cl->err_send_msg;
557 return -ENODEV;
558 }
559
560 /* Check if we have fw client device */
561 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
562 if (id < 0) {
563 ++cl->err_send_msg;
564 return -ENOENT;
565 }
566
567 if (length > dev->fw_clients[id].props.max_msg_length) {
568 ++cl->err_send_msg;
569 return -EMSGSIZE;
570 }
571
572 /* No free bufs */
573 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
574 if (list_empty(&cl->tx_free_list.list)) {
575 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
576 tx_free_flags);
577 ++cl->err_send_msg;
578 return -ENOMEM;
579 }
580
581 cl_msg = list_first_entry(&cl->tx_free_list.list,
582 struct ishtp_cl_tx_ring, list);
583 if (!cl_msg->send_buf.data) {
584 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
585 tx_free_flags);
586 return -EIO;
587 /* Should not happen, as free list is pre-allocated */
588 }
589 /*
590 * This is safe, as 'length' is already checked for not exceeding
591 * max ISHTP message size per client
592 */
593 list_del_init(&cl_msg->list);
594 --cl->tx_ring_free_size;
595
596 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
597 memcpy(cl_msg->send_buf.data, buf, length);
598 cl_msg->send_buf.size = length;
599 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
600 have_msg_to_send = !list_empty(&cl->tx_list.list);
601 list_add_tail(&cl_msg->list, &cl->tx_list.list);
602 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
603
604 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
605 ishtp_cl_send_msg(dev, cl);
606
607 return 0;
608 }
609 EXPORT_SYMBOL(ishtp_cl_send);
610
611 /**
612 * ishtp_cl_read_complete() - read complete
613 * @rb: Pointer to client request block
614 *
615 * If the message is completely received call ishtp_cl_bus_rx_event()
616 * to process message
617 */
618 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
619 {
620 unsigned long flags;
621 int schedule_work_flag = 0;
622 struct ishtp_cl *cl = rb->cl;
623
624 spin_lock_irqsave(&cl->in_process_spinlock, flags);
625 /*
626 * if in-process list is empty, then need to schedule
627 * the processing thread
628 */
629 schedule_work_flag = list_empty(&cl->in_process_list.list);
630 list_add_tail(&rb->list, &cl->in_process_list.list);
631 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
632
633 if (schedule_work_flag)
634 ishtp_cl_bus_rx_event(cl->device);
635 }
636
637 /**
638 * ipc_tx_callback() - IPC tx callback function
639 * @prm: Pointer to client device instance
640 *
641 * Send message over IPC either first time or on callback on previous message
642 * completion
643 */
644 static void ipc_tx_callback(void *prm)
645 {
646 struct ishtp_cl *cl = prm;
647 struct ishtp_cl_tx_ring *cl_msg;
648 size_t rem;
649 struct ishtp_device *dev = (cl ? cl->dev : NULL);
650 struct ishtp_msg_hdr ishtp_hdr;
651 unsigned long tx_flags, tx_free_flags;
652 unsigned char *pmsg;
653
654 if (!dev)
655 return;
656
657 /*
658 * Other conditions if some critical error has
659 * occurred before this callback is called
660 */
661 if (dev->dev_state != ISHTP_DEV_ENABLED)
662 return;
663
664 if (cl->state != ISHTP_CL_CONNECTED)
665 return;
666
667 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
668 if (list_empty(&cl->tx_list.list)) {
669 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
670 return;
671 }
672
673 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
674 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
675 return;
676 }
677
678 if (!cl->sending) {
679 --cl->ishtp_flow_ctrl_creds;
680 cl->last_ipc_acked = 0;
681 cl->last_tx_path = CL_TX_PATH_IPC;
682 cl->sending = 1;
683 }
684
685 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
686 list);
687 rem = cl_msg->send_buf.size - cl->tx_offs;
688
689 ishtp_hdr.host_addr = cl->host_client_id;
690 ishtp_hdr.fw_addr = cl->fw_client_id;
691 ishtp_hdr.reserved = 0;
692 pmsg = cl_msg->send_buf.data + cl->tx_offs;
693
694 if (rem <= dev->mtu) {
695 ishtp_hdr.length = rem;
696 ishtp_hdr.msg_complete = 1;
697 cl->sending = 0;
698 list_del_init(&cl_msg->list); /* Must be before write */
699 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
700 /* Submit to IPC queue with no callback */
701 ishtp_write_message(dev, &ishtp_hdr, pmsg);
702 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
703 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
704 ++cl->tx_ring_free_size;
705 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
706 tx_free_flags);
707 } else {
708 /* Send IPC fragment */
709 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
710 cl->tx_offs += dev->mtu;
711 ishtp_hdr.length = dev->mtu;
712 ishtp_hdr.msg_complete = 0;
713 ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
714 }
715 }
716
717 /**
718 * ishtp_cl_send_msg_ipc() -Send message using IPC
719 * @dev: ISHTP device instance
720 * @cl: Pointer to client device instance
721 *
722 * Send message over IPC not using DMA
723 */
724 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
725 struct ishtp_cl *cl)
726 {
727 /* If last DMA message wasn't acked yet, leave this one in Tx queue */
728 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
729 return;
730
731 cl->tx_offs = 0;
732 ipc_tx_callback(cl);
733 ++cl->send_msg_cnt_ipc;
734 }
735
736 /**
737 * ishtp_cl_send_msg_dma() -Send message using DMA
738 * @dev: ISHTP device instance
739 * @cl: Pointer to client device instance
740 *
741 * Send message using DMA
742 */
743 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
744 struct ishtp_cl *cl)
745 {
746 struct ishtp_msg_hdr hdr;
747 struct dma_xfer_hbm dma_xfer;
748 unsigned char *msg_addr;
749 int off;
750 struct ishtp_cl_tx_ring *cl_msg;
751 unsigned long tx_flags, tx_free_flags;
752
753 /* If last IPC message wasn't acked yet, leave this one in Tx queue */
754 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
755 return;
756
757 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
758 if (list_empty(&cl->tx_list.list)) {
759 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
760 return;
761 }
762
763 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
764 list);
765
766 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
767 if (!msg_addr) {
768 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
769 if (dev->transfer_path == CL_TX_PATH_DEFAULT)
770 ishtp_cl_send_msg_ipc(dev, cl);
771 return;
772 }
773
774 list_del_init(&cl_msg->list); /* Must be before write */
775 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
776
777 --cl->ishtp_flow_ctrl_creds;
778 cl->last_dma_acked = 0;
779 cl->last_dma_addr = msg_addr;
780 cl->last_tx_path = CL_TX_PATH_DMA;
781
782 /* write msg to dma buf */
783 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
784
785 /* send dma_xfer hbm msg */
786 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
787 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
788 dma_xfer.hbm = DMA_XFER;
789 dma_xfer.fw_client_id = cl->fw_client_id;
790 dma_xfer.host_client_id = cl->host_client_id;
791 dma_xfer.reserved = 0;
792 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
793 dma_xfer.msg_length = cl_msg->send_buf.size;
794 dma_xfer.reserved2 = 0;
795 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
796 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
797 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
798 ++cl->tx_ring_free_size;
799 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
800 ++cl->send_msg_cnt_dma;
801 }
802
803 /**
804 * ishtp_cl_send_msg() -Send message using DMA or IPC
805 * @dev: ISHTP device instance
806 * @cl: Pointer to client device instance
807 *
808 * Send message using DMA or IPC based on transfer_path
809 */
810 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
811 {
812 if (dev->transfer_path == CL_TX_PATH_DMA)
813 ishtp_cl_send_msg_dma(dev, cl);
814 else
815 ishtp_cl_send_msg_ipc(dev, cl);
816 }
817
818 /**
819 * recv_ishtp_cl_msg() -Receive client message
820 * @dev: ISHTP device instance
821 * @ishtp_hdr: Pointer to message header
822 *
823 * Receive and dispatch ISHTP client messages. This function executes in ISR
824 * or work queue context
825 */
826 void recv_ishtp_cl_msg(struct ishtp_device *dev,
827 struct ishtp_msg_hdr *ishtp_hdr)
828 {
829 struct ishtp_cl *cl;
830 struct ishtp_cl_rb *rb;
831 struct ishtp_cl_rb *new_rb;
832 unsigned char *buffer = NULL;
833 struct ishtp_cl_rb *complete_rb = NULL;
834 unsigned long flags;
835 int rb_count;
836
837 if (ishtp_hdr->reserved) {
838 dev_err(dev->devc, "corrupted message header.\n");
839 goto eoi;
840 }
841
842 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
843 dev_err(dev->devc,
844 "ISHTP message length in hdr exceeds IPC MTU\n");
845 goto eoi;
846 }
847
848 spin_lock_irqsave(&dev->read_list_spinlock, flags);
849 rb_count = -1;
850 list_for_each_entry(rb, &dev->read_list.list, list) {
851 ++rb_count;
852 cl = rb->cl;
853 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
854 cl->fw_client_id == ishtp_hdr->fw_addr) ||
855 !(cl->state == ISHTP_CL_CONNECTED))
856 continue;
857
858 /* If no Rx buffer is allocated, disband the rb */
859 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
860 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
861 dev_err(&cl->device->dev,
862 "Rx buffer is not allocated.\n");
863 list_del(&rb->list);
864 ishtp_io_rb_free(rb);
865 cl->status = -ENOMEM;
866 goto eoi;
867 }
868
869 /*
870 * If message buffer overflown (exceeds max. client msg
871 * size, drop message and return to free buffer.
872 * Do we need to disconnect such a client? (We don't send
873 * back FC, so communication will be stuck anyway)
874 */
875 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
876 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
877 dev_err(&cl->device->dev,
878 "message overflow. size %d len %d idx %ld\n",
879 rb->buffer.size, ishtp_hdr->length,
880 rb->buf_idx);
881 list_del(&rb->list);
882 ishtp_cl_io_rb_recycle(rb);
883 cl->status = -EIO;
884 goto eoi;
885 }
886
887 buffer = rb->buffer.data + rb->buf_idx;
888 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
889
890 rb->buf_idx += ishtp_hdr->length;
891 if (ishtp_hdr->msg_complete) {
892 /* Last fragment in message - it's complete */
893 cl->status = 0;
894 list_del(&rb->list);
895 complete_rb = rb;
896
897 --cl->out_flow_ctrl_creds;
898 /*
899 * the whole msg arrived, send a new FC, and add a new
900 * rb buffer for the next coming msg
901 */
902 spin_lock(&cl->free_list_spinlock);
903
904 if (!list_empty(&cl->free_rb_list.list)) {
905 new_rb = list_entry(cl->free_rb_list.list.next,
906 struct ishtp_cl_rb, list);
907 list_del_init(&new_rb->list);
908 spin_unlock(&cl->free_list_spinlock);
909 new_rb->cl = cl;
910 new_rb->buf_idx = 0;
911 INIT_LIST_HEAD(&new_rb->list);
912 list_add_tail(&new_rb->list,
913 &dev->read_list.list);
914
915 ishtp_hbm_cl_flow_control_req(dev, cl);
916 } else {
917 spin_unlock(&cl->free_list_spinlock);
918 }
919 }
920 /* One more fragment in message (even if this was last) */
921 ++cl->recv_msg_num_frags;
922
923 /*
924 * We can safely break here (and in BH too),
925 * a single input message can go only to a single request!
926 */
927 break;
928 }
929
930 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
931 /* If it's nobody's message, just read and discard it */
932 if (!buffer) {
933 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
934
935 dev_err(dev->devc, "Dropped Rx msg - no request\n");
936 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
937 goto eoi;
938 }
939
940 if (complete_rb) {
941 cl = complete_rb->cl;
942 cl->ts_rx = ktime_get();
943 ++cl->recv_msg_cnt_ipc;
944 ishtp_cl_read_complete(complete_rb);
945 }
946 eoi:
947 return;
948 }
949
950 /**
951 * recv_ishtp_cl_msg_dma() -Receive client message
952 * @dev: ISHTP device instance
953 * @msg: message pointer
954 * @hbm: hbm buffer
955 *
956 * Receive and dispatch ISHTP client messages using DMA. This function executes
957 * in ISR or work queue context
958 */
959 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
960 struct dma_xfer_hbm *hbm)
961 {
962 struct ishtp_cl *cl;
963 struct ishtp_cl_rb *rb;
964 struct ishtp_cl_rb *new_rb;
965 unsigned char *buffer = NULL;
966 struct ishtp_cl_rb *complete_rb = NULL;
967 unsigned long flags;
968
969 spin_lock_irqsave(&dev->read_list_spinlock, flags);
970
971 list_for_each_entry(rb, &dev->read_list.list, list) {
972 cl = rb->cl;
973 if (!cl || !(cl->host_client_id == hbm->host_client_id &&
974 cl->fw_client_id == hbm->fw_client_id) ||
975 !(cl->state == ISHTP_CL_CONNECTED))
976 continue;
977
978 /*
979 * If no Rx buffer is allocated, disband the rb
980 */
981 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
982 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
983 dev_err(&cl->device->dev,
984 "response buffer is not allocated.\n");
985 list_del(&rb->list);
986 ishtp_io_rb_free(rb);
987 cl->status = -ENOMEM;
988 goto eoi;
989 }
990
991 /*
992 * If message buffer overflown (exceeds max. client msg
993 * size, drop message and return to free buffer.
994 * Do we need to disconnect such a client? (We don't send
995 * back FC, so communication will be stuck anyway)
996 */
997 if (rb->buffer.size < hbm->msg_length) {
998 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
999 dev_err(&cl->device->dev,
1000 "message overflow. size %d len %d idx %ld\n",
1001 rb->buffer.size, hbm->msg_length, rb->buf_idx);
1002 list_del(&rb->list);
1003 ishtp_cl_io_rb_recycle(rb);
1004 cl->status = -EIO;
1005 goto eoi;
1006 }
1007
1008 buffer = rb->buffer.data;
1009 memcpy(buffer, msg, hbm->msg_length);
1010 rb->buf_idx = hbm->msg_length;
1011
1012 /* Last fragment in message - it's complete */
1013 cl->status = 0;
1014 list_del(&rb->list);
1015 complete_rb = rb;
1016
1017 --cl->out_flow_ctrl_creds;
1018 /*
1019 * the whole msg arrived, send a new FC, and add a new
1020 * rb buffer for the next coming msg
1021 */
1022 spin_lock(&cl->free_list_spinlock);
1023
1024 if (!list_empty(&cl->free_rb_list.list)) {
1025 new_rb = list_entry(cl->free_rb_list.list.next,
1026 struct ishtp_cl_rb, list);
1027 list_del_init(&new_rb->list);
1028 spin_unlock(&cl->free_list_spinlock);
1029 new_rb->cl = cl;
1030 new_rb->buf_idx = 0;
1031 INIT_LIST_HEAD(&new_rb->list);
1032 list_add_tail(&new_rb->list,
1033 &dev->read_list.list);
1034
1035 ishtp_hbm_cl_flow_control_req(dev, cl);
1036 } else {
1037 spin_unlock(&cl->free_list_spinlock);
1038 }
1039
1040 /* One more fragment in message (this is always last) */
1041 ++cl->recv_msg_num_frags;
1042
1043 /*
1044 * We can safely break here (and in BH too),
1045 * a single input message can go only to a single request!
1046 */
1047 break;
1048 }
1049
1050 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1051 /* If it's nobody's message, just read and discard it */
1052 if (!buffer) {
1053 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1054 goto eoi;
1055 }
1056
1057 if (complete_rb) {
1058 cl = complete_rb->cl;
1059 cl->ts_rx = ktime_get();
1060 ++cl->recv_msg_cnt_dma;
1061 ishtp_cl_read_complete(complete_rb);
1062 }
1063 eoi:
1064 return;
1065 }
1066
1067 void *ishtp_get_client_data(struct ishtp_cl *cl)
1068 {
1069 return cl->client_data;
1070 }
1071 EXPORT_SYMBOL(ishtp_get_client_data);
1072
1073 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1074 {
1075 cl->client_data = data;
1076 }
1077 EXPORT_SYMBOL(ishtp_set_client_data);
1078
1079 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1080 {
1081 return cl->dev;
1082 }
1083 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1084
1085 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1086 {
1087 cl->tx_ring_size = size;
1088 }
1089 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1090
1091 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1092 {
1093 cl->rx_ring_size = size;
1094 }
1095 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1096
1097 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1098 {
1099 cl->state = state;
1100 }
1101 EXPORT_SYMBOL(ishtp_set_connection_state);
1102
1103 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1104 {
1105 cl->fw_client_id = fw_client_id;
1106 }
1107 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);