]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/hsi/clients/cmt_speech.c
Merge tag 'for-linus-4.11-ofs2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / hsi / clients / cmt_speech.c
1 /*
2 * cmt_speech.c - HSI CMT speech driver
3 *
4 * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
5 *
6 * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
7 * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 */
23
24 #include <linux/errno.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/init.h>
28 #include <linux/device.h>
29 #include <linux/miscdevice.h>
30 #include <linux/mm.h>
31 #include <linux/slab.h>
32 #include <linux/fs.h>
33 #include <linux/poll.h>
34 #include <linux/sched.h>
35 #include <linux/ioctl.h>
36 #include <linux/uaccess.h>
37 #include <linux/pm_qos.h>
38 #include <linux/hsi/hsi.h>
39 #include <linux/hsi/ssi_protocol.h>
40 #include <linux/hsi/cs-protocol.h>
41
42 #define CS_MMAP_SIZE PAGE_SIZE
43
44 struct char_queue {
45 struct list_head list;
46 u32 msg;
47 };
48
49 struct cs_char {
50 unsigned int opened;
51 struct hsi_client *cl;
52 struct cs_hsi_iface *hi;
53 struct list_head chardev_queue;
54 struct list_head dataind_queue;
55 int dataind_pending;
56 /* mmap things */
57 unsigned long mmap_base;
58 unsigned long mmap_size;
59 spinlock_t lock;
60 struct fasync_struct *async_queue;
61 wait_queue_head_t wait;
62 /* hsi channel ids */
63 int channel_id_cmd;
64 int channel_id_data;
65 };
66
67 #define SSI_CHANNEL_STATE_READING 1
68 #define SSI_CHANNEL_STATE_WRITING (1 << 1)
69 #define SSI_CHANNEL_STATE_POLL (1 << 2)
70 #define SSI_CHANNEL_STATE_ERROR (1 << 3)
71
72 #define TARGET_MASK 0xf000000
73 #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
74 #define TARGET_LOCAL 0
75
76 /* Number of pre-allocated commands buffers */
77 #define CS_MAX_CMDS 4
78
79 /*
80 * During data transfers, transactions must be handled
81 * within 20ms (fixed value in cmtspeech HSI protocol)
82 */
83 #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
84
85 /* Timeout to wait for pending HSI transfers to complete */
86 #define CS_HSI_TRANSFER_TIMEOUT_MS 500
87
88
89 #define RX_PTR_BOUNDARY_SHIFT 8
90 #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
91 CS_MAX_BUFFERS_SHIFT)
92 struct cs_hsi_iface {
93 struct hsi_client *cl;
94 struct hsi_client *master;
95
96 unsigned int iface_state;
97 unsigned int wakeline_state;
98 unsigned int control_state;
99 unsigned int data_state;
100
101 /* state exposed to application */
102 struct cs_mmap_config_block *mmap_cfg;
103
104 unsigned long mmap_base;
105 unsigned long mmap_size;
106
107 unsigned int rx_slot;
108 unsigned int tx_slot;
109
110 /* note: for security reasons, we do not trust the contents of
111 * mmap_cfg, but instead duplicate the variables here */
112 unsigned int buf_size;
113 unsigned int rx_bufs;
114 unsigned int tx_bufs;
115 unsigned int rx_ptr_boundary;
116 unsigned int rx_offsets[CS_MAX_BUFFERS];
117 unsigned int tx_offsets[CS_MAX_BUFFERS];
118
119 /* size of aligned memory blocks */
120 unsigned int slot_size;
121 unsigned int flags;
122
123 struct list_head cmdqueue;
124
125 struct hsi_msg *data_rx_msg;
126 struct hsi_msg *data_tx_msg;
127 wait_queue_head_t datawait;
128
129 struct pm_qos_request pm_qos_req;
130
131 spinlock_t lock;
132 };
133
134 static struct cs_char cs_char_data;
135
136 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
137 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
138
139 static inline void rx_ptr_shift_too_big(void)
140 {
141 BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
142 }
143
144 static void cs_notify(u32 message, struct list_head *head)
145 {
146 struct char_queue *entry;
147
148 spin_lock(&cs_char_data.lock);
149
150 if (!cs_char_data.opened) {
151 spin_unlock(&cs_char_data.lock);
152 goto out;
153 }
154
155 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
156 if (!entry) {
157 dev_err(&cs_char_data.cl->device,
158 "Can't allocate new entry for the queue.\n");
159 spin_unlock(&cs_char_data.lock);
160 goto out;
161 }
162
163 entry->msg = message;
164 list_add_tail(&entry->list, head);
165
166 spin_unlock(&cs_char_data.lock);
167
168 wake_up_interruptible(&cs_char_data.wait);
169 kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
170
171 out:
172 return;
173 }
174
175 static u32 cs_pop_entry(struct list_head *head)
176 {
177 struct char_queue *entry;
178 u32 data;
179
180 entry = list_entry(head->next, struct char_queue, list);
181 data = entry->msg;
182 list_del(&entry->list);
183 kfree(entry);
184
185 return data;
186 }
187
188 static void cs_notify_control(u32 message)
189 {
190 cs_notify(message, &cs_char_data.chardev_queue);
191 }
192
193 static void cs_notify_data(u32 message, int maxlength)
194 {
195 cs_notify(message, &cs_char_data.dataind_queue);
196
197 spin_lock(&cs_char_data.lock);
198 cs_char_data.dataind_pending++;
199 while (cs_char_data.dataind_pending > maxlength &&
200 !list_empty(&cs_char_data.dataind_queue)) {
201 dev_dbg(&cs_char_data.cl->device, "data notification "
202 "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
203
204 cs_pop_entry(&cs_char_data.dataind_queue);
205 cs_char_data.dataind_pending--;
206 }
207 spin_unlock(&cs_char_data.lock);
208 }
209
210 static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
211 {
212 u32 *data = sg_virt(msg->sgt.sgl);
213 *data = cmd;
214 }
215
216 static inline u32 cs_get_cmd(struct hsi_msg *msg)
217 {
218 u32 *data = sg_virt(msg->sgt.sgl);
219 return *data;
220 }
221
222 static void cs_release_cmd(struct hsi_msg *msg)
223 {
224 struct cs_hsi_iface *hi = msg->context;
225
226 list_add_tail(&msg->link, &hi->cmdqueue);
227 }
228
229 static void cs_cmd_destructor(struct hsi_msg *msg)
230 {
231 struct cs_hsi_iface *hi = msg->context;
232
233 spin_lock(&hi->lock);
234
235 dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
236
237 if (hi->iface_state != CS_STATE_CLOSED)
238 dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
239
240 if (msg->ttype == HSI_MSG_READ)
241 hi->control_state &=
242 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
243 else if (msg->ttype == HSI_MSG_WRITE &&
244 hi->control_state & SSI_CHANNEL_STATE_WRITING)
245 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
246
247 cs_release_cmd(msg);
248
249 spin_unlock(&hi->lock);
250 }
251
252 static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
253 {
254 struct hsi_msg *msg;
255
256 BUG_ON(list_empty(&ssi->cmdqueue));
257
258 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
259 list_del(&msg->link);
260 msg->destructor = cs_cmd_destructor;
261
262 return msg;
263 }
264
265 static void cs_free_cmds(struct cs_hsi_iface *ssi)
266 {
267 struct hsi_msg *msg, *tmp;
268
269 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
270 list_del(&msg->link);
271 msg->destructor = NULL;
272 kfree(sg_virt(msg->sgt.sgl));
273 hsi_free_msg(msg);
274 }
275 }
276
277 static int cs_alloc_cmds(struct cs_hsi_iface *hi)
278 {
279 struct hsi_msg *msg;
280 u32 *buf;
281 unsigned int i;
282
283 INIT_LIST_HEAD(&hi->cmdqueue);
284
285 for (i = 0; i < CS_MAX_CMDS; i++) {
286 msg = hsi_alloc_msg(1, GFP_KERNEL);
287 if (!msg)
288 goto out;
289 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
290 if (!buf) {
291 hsi_free_msg(msg);
292 goto out;
293 }
294 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
295 msg->channel = cs_char_data.channel_id_cmd;
296 msg->context = hi;
297 list_add_tail(&msg->link, &hi->cmdqueue);
298 }
299
300 return 0;
301
302 out:
303 cs_free_cmds(hi);
304 return -ENOMEM;
305 }
306
307 static void cs_hsi_data_destructor(struct hsi_msg *msg)
308 {
309 struct cs_hsi_iface *hi = msg->context;
310 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
311
312 dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
313
314 spin_lock(&hi->lock);
315 if (hi->iface_state != CS_STATE_CLOSED)
316 dev_err(&cs_char_data.cl->device,
317 "Data %s flush while device active\n", dir);
318 if (msg->ttype == HSI_MSG_READ)
319 hi->data_state &=
320 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
321 else
322 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
323
324 msg->status = HSI_STATUS_COMPLETED;
325 if (unlikely(waitqueue_active(&hi->datawait)))
326 wake_up_interruptible(&hi->datawait);
327
328 spin_unlock(&hi->lock);
329 }
330
331 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
332 {
333 struct hsi_msg *txmsg, *rxmsg;
334 int res = 0;
335
336 rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
337 if (!rxmsg) {
338 res = -ENOMEM;
339 goto out1;
340 }
341 rxmsg->channel = cs_char_data.channel_id_data;
342 rxmsg->destructor = cs_hsi_data_destructor;
343 rxmsg->context = hi;
344
345 txmsg = hsi_alloc_msg(1, GFP_KERNEL);
346 if (!txmsg) {
347 res = -ENOMEM;
348 goto out2;
349 }
350 txmsg->channel = cs_char_data.channel_id_data;
351 txmsg->destructor = cs_hsi_data_destructor;
352 txmsg->context = hi;
353
354 hi->data_rx_msg = rxmsg;
355 hi->data_tx_msg = txmsg;
356
357 return 0;
358
359 out2:
360 hsi_free_msg(rxmsg);
361 out1:
362 return res;
363 }
364
365 static void cs_hsi_free_data_msg(struct hsi_msg *msg)
366 {
367 WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
368 msg->status != HSI_STATUS_ERROR);
369 hsi_free_msg(msg);
370 }
371
372 static void cs_hsi_free_data(struct cs_hsi_iface *hi)
373 {
374 cs_hsi_free_data_msg(hi->data_rx_msg);
375 cs_hsi_free_data_msg(hi->data_tx_msg);
376 }
377
378 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
379 struct hsi_msg *msg, const char *info,
380 unsigned int *state)
381 {
382 spin_lock(&hi->lock);
383 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
384 info, msg->status, *state);
385 }
386
387 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
388 {
389 spin_unlock(&hi->lock);
390 }
391
392 static inline void __cs_hsi_error_read_bits(unsigned int *state)
393 {
394 *state |= SSI_CHANNEL_STATE_ERROR;
395 *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
396 }
397
398 static inline void __cs_hsi_error_write_bits(unsigned int *state)
399 {
400 *state |= SSI_CHANNEL_STATE_ERROR;
401 *state &= ~SSI_CHANNEL_STATE_WRITING;
402 }
403
404 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
405 struct hsi_msg *msg)
406 {
407 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
408 cs_release_cmd(msg);
409 __cs_hsi_error_read_bits(&hi->control_state);
410 __cs_hsi_error_post(hi);
411 }
412
413 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
414 struct hsi_msg *msg)
415 {
416 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
417 cs_release_cmd(msg);
418 __cs_hsi_error_write_bits(&hi->control_state);
419 __cs_hsi_error_post(hi);
420
421 }
422
423 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
424 {
425 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
426 __cs_hsi_error_read_bits(&hi->data_state);
427 __cs_hsi_error_post(hi);
428 }
429
430 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
431 struct hsi_msg *msg)
432 {
433 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
434 __cs_hsi_error_write_bits(&hi->data_state);
435 __cs_hsi_error_post(hi);
436 }
437
438 static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
439 {
440 u32 cmd = cs_get_cmd(msg);
441 struct cs_hsi_iface *hi = msg->context;
442
443 spin_lock(&hi->lock);
444 hi->control_state &= ~SSI_CHANNEL_STATE_READING;
445 if (msg->status == HSI_STATUS_ERROR) {
446 dev_err(&hi->cl->device, "Control RX error detected\n");
447 spin_unlock(&hi->lock);
448 cs_hsi_control_read_error(hi, msg);
449 goto out;
450 }
451 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
452 cs_release_cmd(msg);
453 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
454 struct timespec tspec;
455 struct cs_timestamp *tstamp =
456 &hi->mmap_cfg->tstamp_rx_ctrl;
457
458 ktime_get_ts(&tspec);
459
460 tstamp->tv_sec = (__u32) tspec.tv_sec;
461 tstamp->tv_nsec = (__u32) tspec.tv_nsec;
462 }
463 spin_unlock(&hi->lock);
464
465 cs_notify_control(cmd);
466
467 out:
468 cs_hsi_read_on_control(hi);
469 }
470
471 static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
472 {
473 struct cs_hsi_iface *hi = msg->context;
474 int ret;
475
476 if (msg->status == HSI_STATUS_ERROR) {
477 dev_err(&hi->cl->device, "Control peek RX error detected\n");
478 cs_hsi_control_read_error(hi, msg);
479 return;
480 }
481
482 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
483
484 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
485 msg->sgt.nents = 1;
486 msg->complete = cs_hsi_read_on_control_complete;
487 ret = hsi_async_read(hi->cl, msg);
488 if (ret)
489 cs_hsi_control_read_error(hi, msg);
490 }
491
492 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
493 {
494 struct hsi_msg *msg;
495 int ret;
496
497 spin_lock(&hi->lock);
498 if (hi->control_state & SSI_CHANNEL_STATE_READING) {
499 dev_err(&hi->cl->device, "Control read already pending (%d)\n",
500 hi->control_state);
501 spin_unlock(&hi->lock);
502 return;
503 }
504 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
505 dev_err(&hi->cl->device, "Control read error (%d)\n",
506 hi->control_state);
507 spin_unlock(&hi->lock);
508 return;
509 }
510 hi->control_state |= SSI_CHANNEL_STATE_READING;
511 dev_dbg(&hi->cl->device, "Issuing RX on control\n");
512 msg = cs_claim_cmd(hi);
513 spin_unlock(&hi->lock);
514
515 msg->sgt.nents = 0;
516 msg->complete = cs_hsi_peek_on_control_complete;
517 ret = hsi_async_read(hi->cl, msg);
518 if (ret)
519 cs_hsi_control_read_error(hi, msg);
520 }
521
522 static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
523 {
524 struct cs_hsi_iface *hi = msg->context;
525 if (msg->status == HSI_STATUS_COMPLETED) {
526 spin_lock(&hi->lock);
527 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
528 cs_release_cmd(msg);
529 spin_unlock(&hi->lock);
530 } else if (msg->status == HSI_STATUS_ERROR) {
531 cs_hsi_control_write_error(hi, msg);
532 } else {
533 dev_err(&hi->cl->device,
534 "unexpected status in control write callback %d\n",
535 msg->status);
536 }
537 }
538
539 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
540 {
541 struct hsi_msg *msg;
542 int ret;
543
544 spin_lock(&hi->lock);
545 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
546 spin_unlock(&hi->lock);
547 return -EIO;
548 }
549 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
550 dev_err(&hi->cl->device,
551 "Write still pending on control channel.\n");
552 spin_unlock(&hi->lock);
553 return -EBUSY;
554 }
555 hi->control_state |= SSI_CHANNEL_STATE_WRITING;
556 msg = cs_claim_cmd(hi);
557 spin_unlock(&hi->lock);
558
559 cs_set_cmd(msg, message);
560 msg->sgt.nents = 1;
561 msg->complete = cs_hsi_write_on_control_complete;
562 dev_dbg(&hi->cl->device,
563 "Sending control message %08X\n", message);
564 ret = hsi_async_write(hi->cl, msg);
565 if (ret) {
566 dev_err(&hi->cl->device,
567 "async_write failed with %d\n", ret);
568 cs_hsi_control_write_error(hi, msg);
569 }
570
571 /*
572 * Make sure control read is always pending when issuing
573 * new control writes. This is needed as the controller
574 * may flush our messages if e.g. the peer device reboots
575 * unexpectedly (and we cannot directly resubmit a new read from
576 * the message destructor; see cs_cmd_destructor()).
577 */
578 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
579 dev_err(&hi->cl->device, "Restarting control reads\n");
580 cs_hsi_read_on_control(hi);
581 }
582
583 return 0;
584 }
585
586 static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
587 {
588 struct cs_hsi_iface *hi = msg->context;
589 u32 payload;
590
591 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
592 cs_hsi_data_read_error(hi, msg);
593 return;
594 }
595
596 spin_lock(&hi->lock);
597 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
598 hi->data_state &= ~SSI_CHANNEL_STATE_READING;
599 payload = CS_RX_DATA_RECEIVED;
600 payload |= hi->rx_slot;
601 hi->rx_slot++;
602 hi->rx_slot %= hi->rx_ptr_boundary;
603 /* expose current rx ptr in mmap area */
604 hi->mmap_cfg->rx_ptr = hi->rx_slot;
605 if (unlikely(waitqueue_active(&hi->datawait)))
606 wake_up_interruptible(&hi->datawait);
607 spin_unlock(&hi->lock);
608
609 cs_notify_data(payload, hi->rx_bufs);
610 cs_hsi_read_on_data(hi);
611 }
612
613 static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
614 {
615 struct cs_hsi_iface *hi = msg->context;
616 u32 *address;
617 int ret;
618
619 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
620 cs_hsi_data_read_error(hi, msg);
621 return;
622 }
623 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
624 dev_err(&hi->cl->device, "Data received in invalid state\n");
625 cs_hsi_data_read_error(hi, msg);
626 return;
627 }
628
629 spin_lock(&hi->lock);
630 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
631 hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
632 hi->data_state |= SSI_CHANNEL_STATE_READING;
633 spin_unlock(&hi->lock);
634
635 address = (u32 *)(hi->mmap_base +
636 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
637 sg_init_one(msg->sgt.sgl, address, hi->buf_size);
638 msg->sgt.nents = 1;
639 msg->complete = cs_hsi_read_on_data_complete;
640 ret = hsi_async_read(hi->cl, msg);
641 if (ret)
642 cs_hsi_data_read_error(hi, msg);
643 }
644
645 /*
646 * Read/write transaction is ongoing. Returns false if in
647 * SSI_CHANNEL_STATE_POLL state.
648 */
649 static inline int cs_state_xfer_active(unsigned int state)
650 {
651 return (state & SSI_CHANNEL_STATE_WRITING) ||
652 (state & SSI_CHANNEL_STATE_READING);
653 }
654
655 /*
656 * No pending read/writes
657 */
658 static inline int cs_state_idle(unsigned int state)
659 {
660 return !(state & ~SSI_CHANNEL_STATE_ERROR);
661 }
662
663 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
664 {
665 struct hsi_msg *rxmsg;
666 int ret;
667
668 spin_lock(&hi->lock);
669 if (hi->data_state &
670 (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
671 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
672 hi->data_state);
673 spin_unlock(&hi->lock);
674 return;
675 }
676 hi->data_state |= SSI_CHANNEL_STATE_POLL;
677 spin_unlock(&hi->lock);
678
679 rxmsg = hi->data_rx_msg;
680 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
681 rxmsg->sgt.nents = 0;
682 rxmsg->complete = cs_hsi_peek_on_data_complete;
683
684 ret = hsi_async_read(hi->cl, rxmsg);
685 if (ret)
686 cs_hsi_data_read_error(hi, rxmsg);
687 }
688
689 static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
690 {
691 struct cs_hsi_iface *hi = msg->context;
692
693 if (msg->status == HSI_STATUS_COMPLETED) {
694 spin_lock(&hi->lock);
695 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
696 if (unlikely(waitqueue_active(&hi->datawait)))
697 wake_up_interruptible(&hi->datawait);
698 spin_unlock(&hi->lock);
699 } else {
700 cs_hsi_data_write_error(hi, msg);
701 }
702 }
703
704 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
705 {
706 u32 *address;
707 struct hsi_msg *txmsg;
708 int ret;
709
710 spin_lock(&hi->lock);
711 if (hi->iface_state != CS_STATE_CONFIGURED) {
712 dev_err(&hi->cl->device, "Not configured, aborting\n");
713 ret = -EINVAL;
714 goto error;
715 }
716 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
717 dev_err(&hi->cl->device, "HSI error, aborting\n");
718 ret = -EIO;
719 goto error;
720 }
721 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
722 dev_err(&hi->cl->device, "Write pending on data channel.\n");
723 ret = -EBUSY;
724 goto error;
725 }
726 hi->data_state |= SSI_CHANNEL_STATE_WRITING;
727 spin_unlock(&hi->lock);
728
729 hi->tx_slot = slot;
730 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
731 txmsg = hi->data_tx_msg;
732 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
733 txmsg->complete = cs_hsi_write_on_data_complete;
734 ret = hsi_async_write(hi->cl, txmsg);
735 if (ret)
736 cs_hsi_data_write_error(hi, txmsg);
737
738 return ret;
739
740 error:
741 spin_unlock(&hi->lock);
742 if (ret == -EIO)
743 cs_hsi_data_write_error(hi, hi->data_tx_msg);
744
745 return ret;
746 }
747
748 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
749 {
750 return hi->iface_state;
751 }
752
753 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
754 {
755 int ret = 0;
756
757 local_bh_disable();
758 switch (cmd & TARGET_MASK) {
759 case TARGET_REMOTE:
760 ret = cs_hsi_write_on_control(hi, cmd);
761 break;
762 case TARGET_LOCAL:
763 if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
764 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
765 else
766 ret = -EINVAL;
767 break;
768 default:
769 ret = -EINVAL;
770 break;
771 }
772 local_bh_enable();
773
774 return ret;
775 }
776
777 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
778 {
779 int change = 0;
780
781 spin_lock_bh(&hi->lock);
782 if (hi->wakeline_state != new_state) {
783 hi->wakeline_state = new_state;
784 change = 1;
785 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
786 new_state, hi->cl);
787 }
788 spin_unlock_bh(&hi->lock);
789
790 if (change) {
791 if (new_state)
792 ssip_slave_start_tx(hi->master);
793 else
794 ssip_slave_stop_tx(hi->master);
795 }
796
797 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
798 new_state, hi->cl);
799 }
800
801 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
802 {
803 hi->rx_bufs = rx_bufs;
804 hi->tx_bufs = tx_bufs;
805 hi->mmap_cfg->rx_bufs = rx_bufs;
806 hi->mmap_cfg->tx_bufs = tx_bufs;
807
808 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
809 /*
810 * For more robust overrun detection, let the rx
811 * pointer run in range 0..'boundary-1'. Boundary
812 * is a multiple of rx_bufs, and limited in max size
813 * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
814 * calculation.
815 */
816 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
817 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
818 } else {
819 hi->rx_ptr_boundary = hi->rx_bufs;
820 }
821 }
822
823 static int check_buf_params(struct cs_hsi_iface *hi,
824 const struct cs_buffer_config *buf_cfg)
825 {
826 size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
827 (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
828 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
829 int r = 0;
830
831 if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
832 buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
833 r = -EINVAL;
834 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
835 dev_err(&hi->cl->device, "No space for the requested buffer "
836 "configuration\n");
837 r = -ENOBUFS;
838 }
839
840 return r;
841 }
842
843 /**
844 * Block until pending data transfers have completed.
845 */
846 static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
847 {
848 int r = 0;
849
850 spin_lock_bh(&hi->lock);
851
852 if (!cs_state_xfer_active(hi->data_state)) {
853 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
854 goto out;
855 }
856
857 for (;;) {
858 int s;
859 DEFINE_WAIT(wait);
860 if (!cs_state_xfer_active(hi->data_state))
861 goto out;
862 if (signal_pending(current)) {
863 r = -ERESTARTSYS;
864 goto out;
865 }
866 /**
867 * prepare_to_wait must be called with hi->lock held
868 * so that callbacks can check for waitqueue_active()
869 */
870 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
871 spin_unlock_bh(&hi->lock);
872 s = schedule_timeout(
873 msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
874 spin_lock_bh(&hi->lock);
875 finish_wait(&hi->datawait, &wait);
876 if (!s) {
877 dev_dbg(&hi->cl->device,
878 "hsi_data_sync timeout after %d ms\n",
879 CS_HSI_TRANSFER_TIMEOUT_MS);
880 r = -EIO;
881 goto out;
882 }
883 }
884
885 out:
886 spin_unlock_bh(&hi->lock);
887 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
888
889 return r;
890 }
891
892 static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
893 struct cs_buffer_config *buf_cfg)
894 {
895 unsigned int data_start, i;
896
897 BUG_ON(hi->buf_size == 0);
898
899 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
900
901 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
902 dev_dbg(&hi->cl->device,
903 "setting slot size to %u, buf size %u, align %u\n",
904 hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
905
906 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
907 dev_dbg(&hi->cl->device,
908 "setting data start at %u, cfg block %u, align %u\n",
909 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
910
911 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
912 hi->rx_offsets[i] = data_start + i * hi->slot_size;
913 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
914 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
915 i, hi->rx_offsets[i]);
916 }
917 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
918 hi->tx_offsets[i] = data_start +
919 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
920 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
921 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
922 i, hi->rx_offsets[i]);
923 }
924
925 hi->iface_state = CS_STATE_CONFIGURED;
926 }
927
928 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
929 {
930 if (old_state == CS_STATE_CONFIGURED) {
931 dev_dbg(&hi->cl->device,
932 "closing data channel with slot size 0\n");
933 hi->iface_state = CS_STATE_OPENED;
934 }
935 }
936
937 static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
938 struct cs_buffer_config *buf_cfg)
939 {
940 int r = 0;
941 unsigned int old_state = hi->iface_state;
942
943 spin_lock_bh(&hi->lock);
944 /* Prevent new transactions during buffer reconfig */
945 if (old_state == CS_STATE_CONFIGURED)
946 hi->iface_state = CS_STATE_OPENED;
947 spin_unlock_bh(&hi->lock);
948
949 /*
950 * make sure that no non-zero data reads are ongoing before
951 * proceeding to change the buffer layout
952 */
953 r = cs_hsi_data_sync(hi);
954 if (r < 0)
955 return r;
956
957 WARN_ON(cs_state_xfer_active(hi->data_state));
958
959 spin_lock_bh(&hi->lock);
960 r = check_buf_params(hi, buf_cfg);
961 if (r < 0)
962 goto error;
963
964 hi->buf_size = buf_cfg->buf_size;
965 hi->mmap_cfg->buf_size = hi->buf_size;
966 hi->flags = buf_cfg->flags;
967
968 hi->rx_slot = 0;
969 hi->tx_slot = 0;
970 hi->slot_size = 0;
971
972 if (hi->buf_size)
973 cs_hsi_data_enable(hi, buf_cfg);
974 else
975 cs_hsi_data_disable(hi, old_state);
976
977 spin_unlock_bh(&hi->lock);
978
979 if (old_state != hi->iface_state) {
980 if (hi->iface_state == CS_STATE_CONFIGURED) {
981 pm_qos_add_request(&hi->pm_qos_req,
982 PM_QOS_CPU_DMA_LATENCY,
983 CS_QOS_LATENCY_FOR_DATA_USEC);
984 local_bh_disable();
985 cs_hsi_read_on_data(hi);
986 local_bh_enable();
987 } else if (old_state == CS_STATE_CONFIGURED) {
988 pm_qos_remove_request(&hi->pm_qos_req);
989 }
990 }
991 return r;
992
993 error:
994 spin_unlock_bh(&hi->lock);
995 return r;
996 }
997
998 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
999 unsigned long mmap_base, unsigned long mmap_size)
1000 {
1001 int err = 0;
1002 struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
1003
1004 dev_dbg(&cl->device, "cs_hsi_start\n");
1005
1006 if (!hsi_if) {
1007 err = -ENOMEM;
1008 goto leave0;
1009 }
1010 spin_lock_init(&hsi_if->lock);
1011 hsi_if->cl = cl;
1012 hsi_if->iface_state = CS_STATE_CLOSED;
1013 hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
1014 hsi_if->mmap_base = mmap_base;
1015 hsi_if->mmap_size = mmap_size;
1016 memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
1017 init_waitqueue_head(&hsi_if->datawait);
1018 err = cs_alloc_cmds(hsi_if);
1019 if (err < 0) {
1020 dev_err(&cl->device, "Unable to alloc HSI messages\n");
1021 goto leave1;
1022 }
1023 err = cs_hsi_alloc_data(hsi_if);
1024 if (err < 0) {
1025 dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
1026 goto leave2;
1027 }
1028 err = hsi_claim_port(cl, 1);
1029 if (err < 0) {
1030 dev_err(&cl->device,
1031 "Could not open, HSI port already claimed\n");
1032 goto leave3;
1033 }
1034 hsi_if->master = ssip_slave_get_master(cl);
1035 if (IS_ERR(hsi_if->master)) {
1036 err = PTR_ERR(hsi_if->master);
1037 dev_err(&cl->device, "Could not get HSI master client\n");
1038 goto leave4;
1039 }
1040 if (!ssip_slave_running(hsi_if->master)) {
1041 err = -ENODEV;
1042 dev_err(&cl->device,
1043 "HSI port not initialized\n");
1044 goto leave4;
1045 }
1046
1047 hsi_if->iface_state = CS_STATE_OPENED;
1048 local_bh_disable();
1049 cs_hsi_read_on_control(hsi_if);
1050 local_bh_enable();
1051
1052 dev_dbg(&cl->device, "cs_hsi_start...done\n");
1053
1054 BUG_ON(!hi);
1055 *hi = hsi_if;
1056
1057 return 0;
1058
1059 leave4:
1060 hsi_release_port(cl);
1061 leave3:
1062 cs_hsi_free_data(hsi_if);
1063 leave2:
1064 cs_free_cmds(hsi_if);
1065 leave1:
1066 kfree(hsi_if);
1067 leave0:
1068 dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
1069
1070 return err;
1071 }
1072
1073 static void cs_hsi_stop(struct cs_hsi_iface *hi)
1074 {
1075 dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1076 cs_hsi_set_wakeline(hi, 0);
1077 ssip_slave_put_master(hi->master);
1078
1079 /* hsi_release_port() needs to be called with CS_STATE_CLOSED */
1080 hi->iface_state = CS_STATE_CLOSED;
1081 hsi_release_port(hi->cl);
1082
1083 /*
1084 * hsi_release_port() should flush out all the pending
1085 * messages, so cs_state_idle() should be true for both
1086 * control and data channels.
1087 */
1088 WARN_ON(!cs_state_idle(hi->control_state));
1089 WARN_ON(!cs_state_idle(hi->data_state));
1090
1091 if (pm_qos_request_active(&hi->pm_qos_req))
1092 pm_qos_remove_request(&hi->pm_qos_req);
1093
1094 spin_lock_bh(&hi->lock);
1095 cs_hsi_free_data(hi);
1096 cs_free_cmds(hi);
1097 spin_unlock_bh(&hi->lock);
1098 kfree(hi);
1099 }
1100
1101 static int cs_char_vma_fault(struct vm_fault *vmf)
1102 {
1103 struct cs_char *csdata = vmf->vma->vm_private_data;
1104 struct page *page;
1105
1106 page = virt_to_page(csdata->mmap_base);
1107 get_page(page);
1108 vmf->page = page;
1109
1110 return 0;
1111 }
1112
1113 static const struct vm_operations_struct cs_char_vm_ops = {
1114 .fault = cs_char_vma_fault,
1115 };
1116
1117 static int cs_char_fasync(int fd, struct file *file, int on)
1118 {
1119 struct cs_char *csdata = file->private_data;
1120
1121 if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
1122 return -EIO;
1123
1124 return 0;
1125 }
1126
1127 static unsigned int cs_char_poll(struct file *file, poll_table *wait)
1128 {
1129 struct cs_char *csdata = file->private_data;
1130 unsigned int ret = 0;
1131
1132 poll_wait(file, &cs_char_data.wait, wait);
1133 spin_lock_bh(&csdata->lock);
1134 if (!list_empty(&csdata->chardev_queue))
1135 ret = POLLIN | POLLRDNORM;
1136 else if (!list_empty(&csdata->dataind_queue))
1137 ret = POLLIN | POLLRDNORM;
1138 spin_unlock_bh(&csdata->lock);
1139
1140 return ret;
1141 }
1142
1143 static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
1144 loff_t *unused)
1145 {
1146 struct cs_char *csdata = file->private_data;
1147 u32 data;
1148 ssize_t retval;
1149
1150 if (count < sizeof(data))
1151 return -EINVAL;
1152
1153 for (;;) {
1154 DEFINE_WAIT(wait);
1155
1156 spin_lock_bh(&csdata->lock);
1157 if (!list_empty(&csdata->chardev_queue)) {
1158 data = cs_pop_entry(&csdata->chardev_queue);
1159 } else if (!list_empty(&csdata->dataind_queue)) {
1160 data = cs_pop_entry(&csdata->dataind_queue);
1161 csdata->dataind_pending--;
1162 } else {
1163 data = 0;
1164 }
1165 spin_unlock_bh(&csdata->lock);
1166
1167 if (data)
1168 break;
1169 if (file->f_flags & O_NONBLOCK) {
1170 retval = -EAGAIN;
1171 goto out;
1172 } else if (signal_pending(current)) {
1173 retval = -ERESTARTSYS;
1174 goto out;
1175 }
1176 prepare_to_wait_exclusive(&csdata->wait, &wait,
1177 TASK_INTERRUPTIBLE);
1178 schedule();
1179 finish_wait(&csdata->wait, &wait);
1180 }
1181
1182 retval = put_user(data, (u32 __user *)buf);
1183 if (!retval)
1184 retval = sizeof(data);
1185
1186 out:
1187 return retval;
1188 }
1189
1190 static ssize_t cs_char_write(struct file *file, const char __user *buf,
1191 size_t count, loff_t *unused)
1192 {
1193 struct cs_char *csdata = file->private_data;
1194 u32 data;
1195 int err;
1196 ssize_t retval;
1197
1198 if (count < sizeof(data))
1199 return -EINVAL;
1200
1201 if (get_user(data, (u32 __user *)buf))
1202 retval = -EFAULT;
1203 else
1204 retval = count;
1205
1206 err = cs_hsi_command(csdata->hi, data);
1207 if (err < 0)
1208 retval = err;
1209
1210 return retval;
1211 }
1212
1213 static long cs_char_ioctl(struct file *file, unsigned int cmd,
1214 unsigned long arg)
1215 {
1216 struct cs_char *csdata = file->private_data;
1217 int r = 0;
1218
1219 switch (cmd) {
1220 case CS_GET_STATE: {
1221 unsigned int state;
1222
1223 state = cs_hsi_get_state(csdata->hi);
1224 if (copy_to_user((void __user *)arg, &state, sizeof(state)))
1225 r = -EFAULT;
1226
1227 break;
1228 }
1229 case CS_SET_WAKELINE: {
1230 unsigned int state;
1231
1232 if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
1233 r = -EFAULT;
1234 break;
1235 }
1236
1237 if (state > 1) {
1238 r = -EINVAL;
1239 break;
1240 }
1241
1242 cs_hsi_set_wakeline(csdata->hi, !!state);
1243
1244 break;
1245 }
1246 case CS_GET_IF_VERSION: {
1247 unsigned int ifver = CS_IF_VERSION;
1248
1249 if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
1250 r = -EFAULT;
1251
1252 break;
1253 }
1254 case CS_CONFIG_BUFS: {
1255 struct cs_buffer_config buf_cfg;
1256
1257 if (copy_from_user(&buf_cfg, (void __user *)arg,
1258 sizeof(buf_cfg)))
1259 r = -EFAULT;
1260 else
1261 r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1262
1263 break;
1264 }
1265 default:
1266 r = -ENOTTY;
1267 break;
1268 }
1269
1270 return r;
1271 }
1272
1273 static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
1274 {
1275 if (vma->vm_end < vma->vm_start)
1276 return -EINVAL;
1277
1278 if (vma_pages(vma) != 1)
1279 return -EINVAL;
1280
1281 vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
1282 vma->vm_ops = &cs_char_vm_ops;
1283 vma->vm_private_data = file->private_data;
1284
1285 return 0;
1286 }
1287
1288 static int cs_char_open(struct inode *unused, struct file *file)
1289 {
1290 int ret = 0;
1291 unsigned long p;
1292
1293 spin_lock_bh(&cs_char_data.lock);
1294 if (cs_char_data.opened) {
1295 ret = -EBUSY;
1296 spin_unlock_bh(&cs_char_data.lock);
1297 goto out1;
1298 }
1299 cs_char_data.opened = 1;
1300 cs_char_data.dataind_pending = 0;
1301 spin_unlock_bh(&cs_char_data.lock);
1302
1303 p = get_zeroed_page(GFP_KERNEL);
1304 if (!p) {
1305 ret = -ENOMEM;
1306 goto out2;
1307 }
1308
1309 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1310 if (ret) {
1311 dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
1312 goto out3;
1313 }
1314
1315 /* these are only used in release so lock not needed */
1316 cs_char_data.mmap_base = p;
1317 cs_char_data.mmap_size = CS_MMAP_SIZE;
1318
1319 file->private_data = &cs_char_data;
1320
1321 return 0;
1322
1323 out3:
1324 free_page(p);
1325 out2:
1326 spin_lock_bh(&cs_char_data.lock);
1327 cs_char_data.opened = 0;
1328 spin_unlock_bh(&cs_char_data.lock);
1329 out1:
1330 return ret;
1331 }
1332
1333 static void cs_free_char_queue(struct list_head *head)
1334 {
1335 struct char_queue *entry;
1336 struct list_head *cursor, *next;
1337
1338 if (!list_empty(head)) {
1339 list_for_each_safe(cursor, next, head) {
1340 entry = list_entry(cursor, struct char_queue, list);
1341 list_del(&entry->list);
1342 kfree(entry);
1343 }
1344 }
1345
1346 }
1347
1348 static int cs_char_release(struct inode *unused, struct file *file)
1349 {
1350 struct cs_char *csdata = file->private_data;
1351
1352 cs_hsi_stop(csdata->hi);
1353 spin_lock_bh(&csdata->lock);
1354 csdata->hi = NULL;
1355 free_page(csdata->mmap_base);
1356 cs_free_char_queue(&csdata->chardev_queue);
1357 cs_free_char_queue(&csdata->dataind_queue);
1358 csdata->opened = 0;
1359 spin_unlock_bh(&csdata->lock);
1360
1361 return 0;
1362 }
1363
1364 static const struct file_operations cs_char_fops = {
1365 .owner = THIS_MODULE,
1366 .read = cs_char_read,
1367 .write = cs_char_write,
1368 .poll = cs_char_poll,
1369 .unlocked_ioctl = cs_char_ioctl,
1370 .mmap = cs_char_mmap,
1371 .open = cs_char_open,
1372 .release = cs_char_release,
1373 .fasync = cs_char_fasync,
1374 };
1375
1376 static struct miscdevice cs_char_miscdev = {
1377 .minor = MISC_DYNAMIC_MINOR,
1378 .name = "cmt_speech",
1379 .fops = &cs_char_fops
1380 };
1381
1382 static int cs_hsi_client_probe(struct device *dev)
1383 {
1384 int err = 0;
1385 struct hsi_client *cl = to_hsi_client(dev);
1386
1387 dev_dbg(dev, "hsi_client_probe\n");
1388 init_waitqueue_head(&cs_char_data.wait);
1389 spin_lock_init(&cs_char_data.lock);
1390 cs_char_data.opened = 0;
1391 cs_char_data.cl = cl;
1392 cs_char_data.hi = NULL;
1393 INIT_LIST_HEAD(&cs_char_data.chardev_queue);
1394 INIT_LIST_HEAD(&cs_char_data.dataind_queue);
1395
1396 cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
1397 "speech-control");
1398 if (cs_char_data.channel_id_cmd < 0) {
1399 err = cs_char_data.channel_id_cmd;
1400 dev_err(dev, "Could not get cmd channel (%d)\n", err);
1401 return err;
1402 }
1403
1404 cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
1405 "speech-data");
1406 if (cs_char_data.channel_id_data < 0) {
1407 err = cs_char_data.channel_id_data;
1408 dev_err(dev, "Could not get data channel (%d)\n", err);
1409 return err;
1410 }
1411
1412 err = misc_register(&cs_char_miscdev);
1413 if (err)
1414 dev_err(dev, "Failed to register: %d\n", err);
1415
1416 return err;
1417 }
1418
1419 static int cs_hsi_client_remove(struct device *dev)
1420 {
1421 struct cs_hsi_iface *hi;
1422
1423 dev_dbg(dev, "hsi_client_remove\n");
1424 misc_deregister(&cs_char_miscdev);
1425 spin_lock_bh(&cs_char_data.lock);
1426 hi = cs_char_data.hi;
1427 cs_char_data.hi = NULL;
1428 spin_unlock_bh(&cs_char_data.lock);
1429 if (hi)
1430 cs_hsi_stop(hi);
1431
1432 return 0;
1433 }
1434
1435 static struct hsi_client_driver cs_hsi_driver = {
1436 .driver = {
1437 .name = "cmt-speech",
1438 .owner = THIS_MODULE,
1439 .probe = cs_hsi_client_probe,
1440 .remove = cs_hsi_client_remove,
1441 },
1442 };
1443
1444 static int __init cs_char_init(void)
1445 {
1446 pr_info("CMT speech driver added\n");
1447 return hsi_register_client_driver(&cs_hsi_driver);
1448 }
1449 module_init(cs_char_init);
1450
1451 static void __exit cs_char_exit(void)
1452 {
1453 hsi_unregister_client_driver(&cs_hsi_driver);
1454 pr_info("CMT speech driver removed\n");
1455 }
1456 module_exit(cs_char_exit);
1457
1458 MODULE_ALIAS("hsi:cmt-speech");
1459 MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
1460 MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
1461 MODULE_DESCRIPTION("CMT speech driver");
1462 MODULE_LICENSE("GPL v2");