]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/intel_sst/intel_sst_stream_encoded.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / intel_sst / intel_sst_stream_encoded.c
1 /*
2 * intel_sst_stream.c - Intel SST Driver for audio engine
3 *
4 * Copyright (C) 2008-10 Intel Corp
5 * Authors: Vinod Koul <vinod.koul@intel.com>
6 * Harsha Priya <priya.harsha@intel.com>
7 * Dharageswari R <dharageswari.r@intel.com>
8 * KP Jeeja <jeeja.kp@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2 of the License.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 *
26 * This file contains the stream operations of SST driver
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/pci.h>
32 #include <linux/syscalls.h>
33 #include <linux/firmware.h>
34 #include <linux/sched.h>
35 #ifdef CONFIG_MRST_RAR_HANDLER
36 #include <linux/rar_register.h>
37 #include "../memrar/memrar.h"
38 #endif
39 #include "intel_sst_ioctl.h"
40 #include "intel_sst.h"
41 #include "intel_sst_fw_ipc.h"
42 #include "intel_sst_common.h"
43 /**
44 * sst_get_stream_params - Send msg to query for stream parameters
45 * @str_id: stream id for which the parameters are queried for
46 * @get_params: out parameters to which the parameters are copied to
47 *
48 * This function is called when the stream parameters are queiried for
49 */
50 int sst_get_stream_params(int str_id,
51 struct snd_sst_get_stream_params *get_params)
52 {
53 int retval = 0;
54 struct ipc_post *msg = NULL;
55 struct stream_info *str_info;
56 struct snd_sst_fw_get_stream_params *fw_params;
57
58 pr_debug("get_stream for %d\n", str_id);
59 retval = sst_validate_strid(str_id);
60 if (retval)
61 return retval;
62
63 str_info = &sst_drv_ctx->streams[str_id];
64 if (str_info->status != STREAM_UN_INIT) {
65 if (str_info->ctrl_blk.on == true) {
66 pr_err("control path in use\n");
67 return -EINVAL;
68 }
69 if (sst_create_short_msg(&msg)) {
70 pr_err("message creation failed\n");
71 return -ENOMEM;
72 }
73 fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
74 if (!fw_params) {
75 pr_err("mem allocation failed\n");
76 kfree(msg);
77 return -ENOMEM;
78 }
79
80 sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
81 0, str_id);
82 str_info->ctrl_blk.condition = false;
83 str_info->ctrl_blk.ret_code = 0;
84 str_info->ctrl_blk.on = true;
85 str_info->ctrl_blk.data = (void *) fw_params;
86 spin_lock(&sst_drv_ctx->list_spin_lock);
87 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
88 spin_unlock(&sst_drv_ctx->list_spin_lock);
89 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
90 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
91 &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
92 if (retval) {
93 get_params->codec_params.result = retval;
94 kfree(fw_params);
95 return -EIO;
96 }
97 memcpy(&get_params->pcm_params, &fw_params->pcm_params,
98 sizeof(fw_params->pcm_params));
99 memcpy(&get_params->codec_params.sparams,
100 &fw_params->codec_params,
101 sizeof(fw_params->codec_params));
102 get_params->codec_params.result = 0;
103 get_params->codec_params.stream_id = str_id;
104 get_params->codec_params.codec = str_info->codec;
105 get_params->codec_params.ops = str_info->ops;
106 get_params->codec_params.stream_type = str_info->str_type;
107 kfree(fw_params);
108 } else {
109 pr_debug("Stream is not in the init state\n");
110 }
111 return retval;
112 }
113
114 /**
115 * sst_set_stream_param - Send msg for setting stream parameters
116 *
117 * @str_id: stream id
118 * @str_param: stream params
119 *
120 * This function sets stream params during runtime
121 */
122 int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
123 {
124 int retval = 0;
125 struct ipc_post *msg = NULL;
126 struct stream_info *str_info;
127
128 BUG_ON(!str_param);
129 if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
130 pr_err("Invalid operation\n");
131 return -EINVAL;
132 }
133 retval = sst_validate_strid(str_id);
134 if (retval)
135 return retval;
136 pr_debug("set_stream for %d\n", str_id);
137 str_info = &sst_drv_ctx->streams[str_id];
138 if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
139 if (str_info->ctrl_blk.on == true) {
140 pr_err("control path in use\n");
141 return -EAGAIN;
142 }
143 if (sst_create_large_msg(&msg))
144 return -ENOMEM;
145
146 sst_fill_header(&msg->header,
147 IPC_IA_SET_STREAM_PARAMS, 1, str_id);
148 str_info->ctrl_blk.condition = false;
149 str_info->ctrl_blk.ret_code = 0;
150 str_info->ctrl_blk.on = true;
151 msg->header.part.data = sizeof(u32) +
152 sizeof(str_param->sparams);
153 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
154 memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
155 sizeof(str_param->sparams));
156 spin_lock(&sst_drv_ctx->list_spin_lock);
157 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
158 spin_unlock(&sst_drv_ctx->list_spin_lock);
159 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
160 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
161 &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
162 if (retval < 0) {
163 retval = -EIO;
164 sst_clean_stream(str_info);
165 }
166 } else {
167 retval = -EBADRQC;
168 pr_err("BADQRC for stream\n");
169 }
170 return retval;
171 }
172
173 /**
174 * sst_get_vol - This function allows to get the premix gain or gain of a stream
175 *
176 * @get_vol: this is an output param through which the volume
177 * structure is passed back to user
178 *
179 * This function is called when the premix gain or stream gain is queried for
180 */
181 int sst_get_vol(struct snd_sst_vol *get_vol)
182 {
183 int retval = 0;
184 struct ipc_post *msg = NULL;
185 struct snd_sst_vol *fw_get_vol;
186 int str_id = get_vol->stream_id;
187
188 pr_debug("get vol called\n");
189
190 if (sst_create_short_msg(&msg))
191 return -ENOMEM;
192
193 sst_fill_header(&msg->header,
194 IPC_IA_GET_STREAM_VOL, 0, str_id);
195 sst_drv_ctx->vol_info_blk.condition = false;
196 sst_drv_ctx->vol_info_blk.ret_code = 0;
197 sst_drv_ctx->vol_info_blk.on = true;
198 fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
199 if (!fw_get_vol) {
200 pr_err("mem allocation failed\n");
201 kfree(msg);
202 return -ENOMEM;
203 }
204 sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
205 spin_lock(&sst_drv_ctx->list_spin_lock);
206 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
207 spin_unlock(&sst_drv_ctx->list_spin_lock);
208 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
209 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
210 &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
211 if (retval)
212 retval = -EIO;
213 else {
214 pr_debug("stream id %d\n", fw_get_vol->stream_id);
215 pr_debug("volume %d\n", fw_get_vol->volume);
216 pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
217 pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
218 memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
219 }
220 return retval;
221 }
222
223 /**
224 * sst_set_vol - This function allows to set the premix gain or gain of a stream
225 *
226 * @set_vol: this holds the volume structure that needs to be set
227 *
228 * This function is called when premix gain or stream gain is requested to be set
229 */
230 int sst_set_vol(struct snd_sst_vol *set_vol)
231 {
232
233 int retval = 0;
234 struct ipc_post *msg = NULL;
235
236 pr_debug("set vol called\n");
237
238 if (sst_create_large_msg(&msg)) {
239 pr_err("message creation failed\n");
240 return -ENOMEM;
241 }
242 sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
243 set_vol->stream_id);
244
245 msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
246 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
247 memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
248 sst_drv_ctx->vol_info_blk.condition = false;
249 sst_drv_ctx->vol_info_blk.ret_code = 0;
250 sst_drv_ctx->vol_info_blk.on = true;
251 sst_drv_ctx->vol_info_blk.data = set_vol;
252 spin_lock(&sst_drv_ctx->list_spin_lock);
253 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
254 spin_unlock(&sst_drv_ctx->list_spin_lock);
255 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
256 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
257 &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
258 if (retval) {
259 pr_err("error in set_vol = %d\n", retval);
260 retval = -EIO;
261 }
262 return retval;
263 }
264
265 /**
266 * sst_set_mute - This function sets premix mute or soft mute of a stream
267 *
268 * @set_mute: this holds the mute structure that needs to be set
269 *
270 * This function is called when premix mute or stream mute requested to be set
271 */
272 int sst_set_mute(struct snd_sst_mute *set_mute)
273 {
274
275 int retval = 0;
276 struct ipc_post *msg = NULL;
277
278 pr_debug("set mute called\n");
279
280 if (sst_create_large_msg(&msg)) {
281 pr_err("message creation failed\n");
282 return -ENOMEM;
283 }
284 sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
285 set_mute->stream_id);
286 sst_drv_ctx->mute_info_blk.condition = false;
287 sst_drv_ctx->mute_info_blk.ret_code = 0;
288 sst_drv_ctx->mute_info_blk.on = true;
289 sst_drv_ctx->mute_info_blk.data = set_mute;
290
291 msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
292 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
293 memcpy(msg->mailbox_data + sizeof(u32), set_mute,
294 sizeof(*set_mute));
295 spin_lock(&sst_drv_ctx->list_spin_lock);
296 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
297 spin_unlock(&sst_drv_ctx->list_spin_lock);
298 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
299 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
300 &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
301 if (retval) {
302 pr_err("error in set_mute = %d\n", retval);
303 retval = -EIO;
304 }
305 return retval;
306 }
307
308 int sst_prepare_target(struct snd_sst_slot_info *slot)
309 {
310 if (slot->target_device == SND_SST_TARGET_PMIC
311 && slot->device_instance == 1) {
312 /*music mode*/
313 if (sst_drv_ctx->pmic_port_instance == 0)
314 sst_drv_ctx->scard_ops->set_voice_port(
315 DEACTIVATE);
316 } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
317 slot->target_device == SND_SST_TARGET_MODEM) &&
318 slot->device_instance == 0) {
319 /*voip mode where pcm0 is active*/
320 if (sst_drv_ctx->pmic_port_instance == 1)
321 sst_drv_ctx->scard_ops->set_audio_port(
322 DEACTIVATE);
323 }
324 return 0;
325 }
326
327 int sst_activate_target(struct snd_sst_slot_info *slot)
328 {
329 if (slot->target_device == SND_SST_TARGET_PMIC &&
330 slot->device_instance == 1) {
331 /*music mode*/
332 sst_drv_ctx->pmic_port_instance = 1;
333 sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
334 sst_drv_ctx->scard_ops->set_pcm_audio_params(
335 slot->pcm_params.sfreq,
336 slot->pcm_params.pcm_wd_sz,
337 slot->pcm_params.num_chan);
338 if (sst_drv_ctx->pb_streams)
339 sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
340 if (sst_drv_ctx->cp_streams)
341 sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
342 } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
343 slot->target_device == SND_SST_TARGET_MODEM) &&
344 slot->device_instance == 0) {
345 /*voip mode where pcm0 is active*/
346 sst_drv_ctx->pmic_port_instance = 0;
347 sst_drv_ctx->scard_ops->set_voice_port(
348 ACTIVATE);
349 sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
350 /*sst_drv_ctx->scard_ops->power_up_pmic_cp(0);*/
351 }
352 return 0;
353 }
354
355 int sst_parse_target(struct snd_sst_slot_info *slot)
356 {
357 int retval = 0;
358
359 if (slot->action == SND_SST_PORT_ACTIVATE &&
360 slot->device_type == SND_SST_DEVICE_PCM) {
361 retval = sst_activate_target(slot);
362 if (retval)
363 pr_err("SST_Activate_target_fail\n");
364 else
365 pr_err("SST_Activate_target_pass\n");
366 return retval;
367 } else if (slot->action == SND_SST_PORT_PREPARE &&
368 slot->device_type == SND_SST_DEVICE_PCM) {
369 retval = sst_prepare_target(slot);
370 if (retval)
371 pr_err("SST_prepare_target_fail\n");
372 else
373 pr_err("SST_prepare_target_pass\n");
374 return retval;
375 } else {
376 pr_err("slot_action : %d, device_type: %d\n",
377 slot->action, slot->device_type);
378 return retval;
379 }
380 }
381
382 int sst_send_target(struct snd_sst_target_device *target)
383 {
384 int retval;
385 struct ipc_post *msg;
386
387 if (sst_create_large_msg(&msg)) {
388 pr_err("message creation failed\n");
389 return -ENOMEM;
390 }
391 sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
392 sst_drv_ctx->tgt_dev_blk.condition = false;
393 sst_drv_ctx->tgt_dev_blk.ret_code = 0;
394 sst_drv_ctx->tgt_dev_blk.on = true;
395
396 msg->header.part.data = sizeof(u32) + sizeof(*target);
397 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
398 memcpy(msg->mailbox_data + sizeof(u32), target,
399 sizeof(*target));
400 spin_lock(&sst_drv_ctx->list_spin_lock);
401 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
402 spin_unlock(&sst_drv_ctx->list_spin_lock);
403 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
404 pr_debug("message sent- waiting\n");
405 retval = sst_wait_interruptible_timeout(sst_drv_ctx,
406 &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
407 if (retval)
408 pr_err("target device ipc failed = 0x%x\n", retval);
409 return retval;
410
411 }
412
413 int sst_target_device_validate(struct snd_sst_target_device *target)
414 {
415 int retval = 0;
416 int i;
417
418 for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
419 if (target->devices[i].device_type == SND_SST_DEVICE_PCM) {
420 /*pcm device, check params*/
421 if (target->devices[i].device_instance == 1) {
422 if ((target->devices[i].device_mode !=
423 SND_SST_DEV_MODE_PCM_MODE4_I2S) &&
424 (target->devices[i].device_mode !=
425 SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED)
426 && (target->devices[i].device_mode !=
427 SND_SST_DEV_MODE_PCM_MODE1))
428 goto err;
429 } else if (target->devices[i].device_instance == 0) {
430 if ((target->devices[i].device_mode !=
431 SND_SST_DEV_MODE_PCM_MODE2)
432 && (target->devices[i].device_mode !=
433 SND_SST_DEV_MODE_PCM_MODE4_I2S)
434 && (target->devices[i].device_mode !=
435 SND_SST_DEV_MODE_PCM_MODE1))
436 goto err;
437 if (target->devices[i].pcm_params.sfreq != 8000
438 || target->devices[i].pcm_params.num_chan != 1
439 || target->devices[i].pcm_params.pcm_wd_sz !=
440 16)
441 goto err;
442 } else {
443 err:
444 pr_err("i/p params incorrect\n");
445 return -EINVAL;
446 }
447 }
448 }
449 return retval;
450 }
451
452 /**
453 * sst_target_device_select - This function sets the target device configurations
454 *
455 * @target: this parameter holds the configurations to be set
456 *
457 * This function is called when the user layer wants to change the target
458 * device's configurations
459 */
460
461 int sst_target_device_select(struct snd_sst_target_device *target)
462 {
463 int retval, i, prepare_count = 0;
464
465 pr_debug("Target Device Select\n");
466
467 if (target->device_route < 0 || target->device_route > 2) {
468 pr_err("device route is invalid\n");
469 return -EINVAL;
470 }
471
472 if (target->device_route != 0) {
473 pr_err("Unsupported config\n");
474 return -EIO;
475 }
476 retval = sst_target_device_validate(target);
477 if (retval)
478 return retval;
479
480 retval = sst_send_target(target);
481 if (retval)
482 return retval;
483 for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
484 if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
485 pr_debug("activate called in %d\n", i);
486 retval = sst_parse_target(&target->devices[i]);
487 if (retval)
488 return retval;
489 } else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
490 pr_debug("PREPARE in %d, Forwarding\n", i);
491 retval = sst_parse_target(&target->devices[i]);
492 if (retval) {
493 pr_err("Parse Target fail %d\n", retval);
494 return retval;
495 }
496 pr_debug("Parse Target successful %d\n", retval);
497 if (target->devices[i].device_type ==
498 SND_SST_DEVICE_PCM)
499 prepare_count++;
500 }
501 }
502 if (target->devices[0].action == SND_SST_PORT_PREPARE &&
503 prepare_count == 0)
504 sst_drv_ctx->scard_ops->power_down_pmic();
505
506 return retval;
507 }
508 #ifdef CONFIG_MRST_RAR_HANDLER
509 /*This function gets the physical address of the secure memory from the handle*/
510 static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
511 {
512 int retval = 0, rar_status = 0;
513
514 rar_status = rar_handle_to_bus(buffers, count);
515
516 if (count != rar_status) {
517 pr_err("The rar CALL Failed");
518 retval = -EIO;
519 }
520 if (buffers->info.type != RAR_TYPE_AUDIO) {
521 pr_err("Invalid RAR type\n");
522 return -EINVAL;
523 }
524 return retval;
525 }
526
527 #endif
528
529 /* This function creates the scatter gather list to be sent to firmware to
530 capture/playback data*/
531 static int sst_create_sg_list(struct stream_info *stream,
532 struct sst_frame_info *sg_list)
533 {
534 struct sst_stream_bufs *kbufs = NULL;
535 #ifdef CONFIG_MRST_RAR_HANDLER
536 struct RAR_buffer rar_buffers;
537 int retval = 0;
538 #endif
539 int i = 0;
540 list_for_each_entry(kbufs, &stream->bufs, node) {
541 if (kbufs->in_use == false) {
542 #ifdef CONFIG_MRST_RAR_HANDLER
543 if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
544 pr_debug("DRM playback handling\n");
545 rar_buffers.info.handle = (__u32)kbufs->addr;
546 rar_buffers.info.size = kbufs->size;
547 pr_debug("rar handle 0x%x size=0x%x\n",
548 rar_buffers.info.handle,
549 rar_buffers.info.size);
550 retval = sst_get_RAR(&rar_buffers, 1);
551
552 if (retval)
553 return retval;
554 sg_list->addr[i].addr = rar_buffers.bus_address;
555 /* rar_buffers.info.size; */
556 sg_list->addr[i].size = (__u32)kbufs->size;
557 pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
558 , i, sg_list->addr[i].addr,
559 sg_list->addr[i].size);
560 }
561 #endif
562 if (stream->ops != STREAM_OPS_PLAYBACK_DRM) {
563 sg_list->addr[i].addr =
564 virt_to_phys((void *)
565 kbufs->addr + kbufs->offset);
566 sg_list->addr[i].size = kbufs->size;
567 pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
568 , i , sg_list->addr[i].addr, kbufs->size);
569 }
570 stream->curr_bytes += sg_list->addr[i].size;
571 kbufs->in_use = true;
572 i++;
573 }
574 if (i >= MAX_NUM_SCATTER_BUFFERS)
575 break;
576 }
577
578 sg_list->num_entries = i;
579 pr_debug("sg list entries = %d\n", sg_list->num_entries);
580 return i;
581 }
582
583
584 /**
585 * sst_play_frame - Send msg for sending stream frames
586 *
587 * @str_id: ID of stream
588 *
589 * This function is called to send data to be played out
590 * to the firmware
591 */
592 int sst_play_frame(int str_id)
593 {
594 int i = 0, retval = 0;
595 struct ipc_post *msg = NULL;
596 struct sst_frame_info sg_list = {0};
597 struct sst_stream_bufs *kbufs = NULL, *_kbufs;
598 struct stream_info *stream;
599
600 pr_debug("play frame for %d\n", str_id);
601 retval = sst_validate_strid(str_id);
602 if (retval)
603 return retval;
604
605 stream = &sst_drv_ctx->streams[str_id];
606 /* clear prev sent buffers */
607 list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
608 if (kbufs->in_use == true) {
609 spin_lock(&stream->pcm_lock);
610 list_del(&kbufs->node);
611 spin_unlock(&stream->pcm_lock);
612 kfree(kbufs);
613 }
614 }
615 /* update bytes sent */
616 stream->cumm_bytes += stream->curr_bytes;
617 stream->curr_bytes = 0;
618 if (list_empty(&stream->bufs)) {
619 /* no user buffer available */
620 pr_debug("Null buffer stream status %d\n", stream->status);
621 stream->prev = stream->status;
622 stream->status = STREAM_INIT;
623 pr_debug("new stream status = %d\n", stream->status);
624 if (stream->need_draining == true) {
625 pr_debug("draining stream\n");
626 if (sst_create_short_msg(&msg)) {
627 pr_err("mem allocation failed\n");
628 return -ENOMEM;
629 }
630 sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
631 0, str_id);
632 spin_lock(&sst_drv_ctx->list_spin_lock);
633 list_add_tail(&msg->node,
634 &sst_drv_ctx->ipc_dispatch_list);
635 spin_unlock(&sst_drv_ctx->list_spin_lock);
636 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
637 } else if (stream->data_blk.on == true) {
638 pr_debug("user list empty.. wake\n");
639 /* unblock */
640 stream->data_blk.ret_code = 0;
641 stream->data_blk.condition = true;
642 stream->data_blk.on = false;
643 wake_up(&sst_drv_ctx->wait_queue);
644 }
645 return 0;
646 }
647
648 /* create list */
649 i = sst_create_sg_list(stream, &sg_list);
650
651 /* post msg */
652 if (sst_create_large_msg(&msg))
653 return -ENOMEM;
654
655 sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
656 msg->header.part.data = sizeof(u32) + sizeof(sg_list);
657 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
658 memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
659 spin_lock(&sst_drv_ctx->list_spin_lock);
660 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
661 spin_unlock(&sst_drv_ctx->list_spin_lock);
662 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
663 return 0;
664
665 }
666
667 /**
668 * sst_capture_frame - Send msg for sending stream frames
669 *
670 * @str_id: ID of stream
671 *
672 * This function is called to capture data from the firmware
673 */
674 int sst_capture_frame(int str_id)
675 {
676 int i = 0, retval = 0;
677 struct ipc_post *msg = NULL;
678 struct sst_frame_info sg_list = {0};
679 struct sst_stream_bufs *kbufs = NULL, *_kbufs;
680 struct stream_info *stream;
681
682
683 pr_debug("capture frame for %d\n", str_id);
684 retval = sst_validate_strid(str_id);
685 if (retval)
686 return retval;
687 stream = &sst_drv_ctx->streams[str_id];
688 /* clear prev sent buffers */
689 list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
690 if (kbufs->in_use == true) {
691 list_del(&kbufs->node);
692 kfree(kbufs);
693 pr_debug("del node\n");
694 }
695 }
696 if (list_empty(&stream->bufs)) {
697 /* no user buffer available */
698 pr_debug("Null buffer!!!!stream status %d\n",
699 stream->status);
700 stream->prev = stream->status;
701 stream->status = STREAM_INIT;
702 pr_debug("new stream status = %d\n",
703 stream->status);
704 if (stream->data_blk.on == true) {
705 pr_debug("user list empty.. wake\n");
706 /* unblock */
707 stream->data_blk.ret_code = 0;
708 stream->data_blk.condition = true;
709 stream->data_blk.on = false;
710 wake_up(&sst_drv_ctx->wait_queue);
711
712 }
713 return 0;
714 }
715 /* create new sg list */
716 i = sst_create_sg_list(stream, &sg_list);
717
718 /* post msg */
719 if (sst_create_large_msg(&msg))
720 return -ENOMEM;
721
722 sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
723 msg->header.part.data = sizeof(u32) + sizeof(sg_list);
724 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
725 memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
726 spin_lock(&sst_drv_ctx->list_spin_lock);
727 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
728 spin_unlock(&sst_drv_ctx->list_spin_lock);
729 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
730
731
732 /*update bytes recevied*/
733 stream->cumm_bytes += stream->curr_bytes;
734 stream->curr_bytes = 0;
735
736 pr_debug("Cum bytes = %d\n", stream->cumm_bytes);
737 return 0;
738 }
739
740 /*This function is used to calculate the minimum size of input buffers given*/
741 static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
742 {
743 int i, min_val = bufs->buff_entry[0].size;
744 for (i = 1 ; i < bufs->entries; i++) {
745 if (bufs->buff_entry[i].size < min_val)
746 min_val = bufs->buff_entry[i].size;
747 }
748 pr_debug("min_val = %d\n", min_val);
749 return min_val;
750 }
751
752 static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
753 {
754 int i, max_val = bufs->buff_entry[0].size;
755 for (i = 1 ; i < bufs->entries; i++) {
756 if (bufs->buff_entry[i].size > max_val)
757 max_val = bufs->buff_entry[i].size;
758 }
759 pr_debug("max_val = %d\n", max_val);
760 return max_val;
761 }
762
763 /*This function is used to allocate input and output buffers to be sent to
764 the firmware that will take encoded data and return decoded data*/
765 static int sst_allocate_decode_buf(struct stream_info *str_info,
766 struct snd_sst_dbufs *dbufs,
767 unsigned int cum_input_given,
768 unsigned int cum_output_given)
769 {
770 #ifdef CONFIG_MRST_RAR_HANDLER
771 if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
772
773 if (dbufs->ibufs->type == SST_BUF_RAR &&
774 dbufs->obufs->type == SST_BUF_RAR) {
775 if (dbufs->ibufs->entries == dbufs->obufs->entries)
776 return 0;
777 else {
778 pr_err("RAR entries dont match\n");
779 return -EINVAL;
780 }
781 } else
782 str_info->decode_osize = cum_output_given;
783 return 0;
784
785 }
786 #endif
787 if (!str_info->decode_ibuf) {
788 pr_debug("no i/p buffers, trying full size\n");
789 str_info->decode_isize = cum_input_given;
790 str_info->decode_ibuf = kzalloc(str_info->decode_isize,
791 GFP_KERNEL);
792 str_info->idecode_alloc = str_info->decode_isize;
793 }
794 if (!str_info->decode_ibuf) {
795 pr_debug("buff alloc failed, try max size\n");
796 str_info->decode_isize = calculate_max_size(dbufs->ibufs);
797 str_info->decode_ibuf = kzalloc(
798 str_info->decode_isize, GFP_KERNEL);
799 str_info->idecode_alloc = str_info->decode_isize;
800 }
801 if (!str_info->decode_ibuf) {
802 pr_debug("buff alloc failed, try min size\n");
803 str_info->decode_isize = calculate_min_size(dbufs->ibufs);
804 str_info->decode_ibuf = kzalloc(str_info->decode_isize,
805 GFP_KERNEL);
806 if (!str_info->decode_ibuf) {
807 pr_err("mem allocation failed\n");
808 return -ENOMEM;
809 }
810 str_info->idecode_alloc = str_info->decode_isize;
811 }
812 str_info->decode_osize = cum_output_given;
813 if (str_info->decode_osize > sst_drv_ctx->mmap_len)
814 str_info->decode_osize = sst_drv_ctx->mmap_len;
815 return 0;
816 }
817
818 /*This function is used to send the message to firmware to decode the data*/
819 static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
820 struct snd_sst_decode_info *dec_info)
821 {
822 struct ipc_post *msg = NULL;
823 int retval = 0;
824
825 pr_debug("SST DBG:sst_set_mute:called\n");
826
827 if (str_info->decode_ibuf_type == SST_BUF_RAR) {
828 #ifdef CONFIG_MRST_RAR_HANDLER
829 dec_info->frames_in.addr[0].addr =
830 (unsigned long)str_info->decode_ibuf;
831 dec_info->frames_in.addr[0].size =
832 str_info->decode_isize;
833 #endif
834
835 } else {
836 dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
837 str_info->decode_ibuf);
838 dec_info->frames_in.addr[0].size = str_info->decode_isize;
839 }
840
841
842 if (str_info->decode_obuf_type == SST_BUF_RAR) {
843 #ifdef CONFIG_MRST_RAR_HANDLER
844 dec_info->frames_out.addr[0].addr =
845 (unsigned long)str_info->decode_obuf;
846 dec_info->frames_out.addr[0].size = str_info->decode_osize;
847 #endif
848
849 } else {
850 dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
851 str_info->decode_obuf) ;
852 dec_info->frames_out.addr[0].size = str_info->decode_osize;
853 }
854
855 dec_info->frames_in.num_entries = 1;
856 dec_info->frames_out.num_entries = 1;
857 dec_info->frames_in.rsrvd = 0;
858 dec_info->frames_out.rsrvd = 0;
859 dec_info->input_bytes_consumed = 0;
860 dec_info->output_bytes_produced = 0;
861 if (sst_create_large_msg(&msg)) {
862 pr_err("message creation failed\n");
863 return -ENOMEM;
864 }
865
866 sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
867 msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
868 memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
869 memcpy(msg->mailbox_data + sizeof(u32), dec_info,
870 sizeof(*dec_info));
871 spin_lock(&sst_drv_ctx->list_spin_lock);
872 list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
873 spin_unlock(&sst_drv_ctx->list_spin_lock);
874 str_info->data_blk.condition = false;
875 str_info->data_blk.ret_code = 0;
876 str_info->data_blk.on = true;
877 str_info->data_blk.data = dec_info;
878 sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
879 retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
880 return retval;
881 }
882
883 #ifdef CONFIG_MRST_RAR_HANDLER
884 static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
885 struct snd_sst_dbufs *dbufs,
886 int *input_index, int *in_copied,
887 int *input_index_valid_size, int *new_entry_flag)
888 {
889 int retval = 0;
890 int i;
891
892 if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
893 struct RAR_buffer rar_buffers;
894 __u32 info;
895 retval = copy_from_user((void *) &info,
896 dbufs->ibufs->buff_entry[i].buffer,
897 sizeof(__u32));
898 if (retval) {
899 pr_err("cpy from user fail\n");
900 return -EAGAIN;
901 }
902 rar_buffers.info.type = dbufs->ibufs->type;
903 rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
904 rar_buffers.info.handle = info;
905 pr_debug("rar in DnR(input buffer function)=0x%x size=0x%x",
906 rar_buffers.info.handle,
907 rar_buffers.info.size);
908 retval = sst_get_RAR(&rar_buffers, 1);
909 if (retval) {
910 pr_debug("SST ERR: RAR API failed\n");
911 return retval;
912 }
913 str_info->decode_ibuf =
914 (void *) ((unsigned long) rar_buffers.bus_address);
915 pr_debug("RAR buf addr in DnR (input buffer function)0x%lu",
916 (unsigned long) str_info->decode_ibuf);
917 pr_debug("rar in DnR decode function/output b_add rar =0x%lu",
918 (unsigned long) rar_buffers.bus_address);
919 *input_index = i + 1;
920 str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
921 str_info->decode_ibuf_type = dbufs->ibufs->type;
922 *in_copied = str_info->decode_isize;
923 }
924 return retval;
925 }
926 #endif
927
928 /*This function is used to prepare the kernel input buffers with contents
929 before sending for decode*/
930 static int sst_prepare_input_buffers(struct stream_info *str_info,
931 struct snd_sst_dbufs *dbufs,
932 int *input_index, int *in_copied,
933 int *input_index_valid_size, int *new_entry_flag)
934 {
935 int i, cpy_size, retval = 0;
936
937 pr_debug("input_index = %d, input entries = %d\n",
938 *input_index, dbufs->ibufs->entries);
939 for (i = *input_index; i < dbufs->ibufs->entries; i++) {
940 #ifdef CONFIG_MRST_RAR_HANDLER
941 retval = sst_prepare_input_buffers_rar(str_info,
942 dbufs, input_index, in_copied,
943 input_index_valid_size, new_entry_flag);
944 if (retval) {
945 pr_err("In prepare input buffers for RAR\n");
946 return -EIO;
947 }
948 #endif
949 *input_index = i;
950 if (*input_index_valid_size == 0)
951 *input_index_valid_size =
952 dbufs->ibufs->buff_entry[i].size;
953 pr_debug("inout addr = %p, size = %d\n",
954 dbufs->ibufs->buff_entry[i].buffer,
955 *input_index_valid_size);
956 pr_debug("decode_isize = %d, in_copied %d\n",
957 str_info->decode_isize, *in_copied);
958 if (*input_index_valid_size <=
959 (str_info->decode_isize - *in_copied))
960 cpy_size = *input_index_valid_size;
961 else
962 cpy_size = str_info->decode_isize - *in_copied;
963
964 pr_debug("cpy size = %d\n", cpy_size);
965 if (!dbufs->ibufs->buff_entry[i].buffer) {
966 pr_err("i/p buffer is null\n");
967 return -EINVAL;
968 }
969 pr_debug("Try copy To %p, From %p, size %d\n",
970 str_info->decode_ibuf + *in_copied,
971 dbufs->ibufs->buff_entry[i].buffer, cpy_size);
972
973 retval =
974 copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
975 (void *) dbufs->ibufs->buff_entry[i].buffer,
976 cpy_size);
977 if (retval) {
978 pr_err("copy from user failed\n");
979 return -EIO;
980 }
981 *in_copied += cpy_size;
982 *input_index_valid_size -= cpy_size;
983 pr_debug("in buff size = %d, in_copied = %d\n",
984 *input_index_valid_size, *in_copied);
985 if (*input_index_valid_size != 0) {
986 pr_debug("more input buffers left\n");
987 dbufs->ibufs->buff_entry[i].buffer += cpy_size;
988 break;
989 }
990 if (*in_copied == str_info->decode_isize &&
991 *input_index_valid_size == 0 &&
992 (i+1) <= dbufs->ibufs->entries) {
993 pr_debug("all input buffers copied\n");
994 *new_entry_flag = true;
995 *input_index = i + 1;
996 break;
997 }
998 }
999 return retval;
1000 }
1001
1002 /* This function is used to copy the decoded data from kernel buffers to
1003 the user output buffers with contents after decode*/
1004 static int sst_prepare_output_buffers(struct stream_info *str_info,
1005 struct snd_sst_dbufs *dbufs,
1006 int *output_index, int output_size,
1007 int *out_copied)
1008
1009 {
1010 int i, cpy_size, retval = 0;
1011 pr_debug("output_index = %d, output entries = %d\n",
1012 *output_index,
1013 dbufs->obufs->entries);
1014 for (i = *output_index; i < dbufs->obufs->entries; i++) {
1015 *output_index = i;
1016 pr_debug("output addr = %p, size = %d\n",
1017 dbufs->obufs->buff_entry[i].buffer,
1018 dbufs->obufs->buff_entry[i].size);
1019 pr_debug("output_size = %d, out_copied = %d\n",
1020 output_size, *out_copied);
1021 if (dbufs->obufs->buff_entry[i].size <
1022 (output_size - *out_copied))
1023 cpy_size = dbufs->obufs->buff_entry[i].size;
1024 else
1025 cpy_size = output_size - *out_copied;
1026 pr_debug("cpy size = %d\n", cpy_size);
1027 pr_debug("Try copy To: %p, From %p, size %d\n",
1028 dbufs->obufs->buff_entry[i].buffer,
1029 sst_drv_ctx->mmap_mem + *out_copied,
1030 cpy_size);
1031 retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
1032 sst_drv_ctx->mmap_mem + *out_copied,
1033 cpy_size);
1034 if (retval) {
1035 pr_err("copy to user failed\n");
1036 return -EIO;
1037 } else
1038 pr_debug("copy to user passed\n");
1039 *out_copied += cpy_size;
1040 dbufs->obufs->buff_entry[i].size -= cpy_size;
1041 pr_debug("o/p buff size %d, out_copied %d\n",
1042 dbufs->obufs->buff_entry[i].size, *out_copied);
1043 if (dbufs->obufs->buff_entry[i].size != 0) {
1044 *output_index = i;
1045 dbufs->obufs->buff_entry[i].buffer += cpy_size;
1046 break;
1047 } else if (*out_copied == output_size) {
1048 *output_index = i + 1;
1049 break;
1050 }
1051 }
1052 return retval;
1053 }
1054
1055 /**
1056 * sst_decode - Send msg for decoding frames
1057 *
1058 * @str_id: ID of stream
1059 * @dbufs: param that holds the user input and output buffers and size
1060 *
1061 * This function is called to decode data from the firmware
1062 */
1063 int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
1064 {
1065 int retval = 0, i;
1066 unsigned long long total_input = 0 , total_output = 0;
1067 unsigned int cum_input_given = 0 , cum_output_given = 0;
1068 int copy_in_done = false, copy_out_done = false;
1069 int input_index = 0, output_index = 0;
1070 int input_index_valid_size = 0;
1071 int in_copied, out_copied;
1072 int new_entry_flag;
1073 u64 output_size;
1074 struct stream_info *str_info;
1075 struct snd_sst_decode_info dec_info;
1076 unsigned long long input_bytes, output_bytes;
1077
1078 sst_drv_ctx->scard_ops->power_down_pmic();
1079 pr_debug("Powering_down_PMIC...\n");
1080
1081 retval = sst_validate_strid(str_id);
1082 if (retval)
1083 return retval;
1084
1085 str_info = &sst_drv_ctx->streams[str_id];
1086 if (str_info->status != STREAM_INIT) {
1087 pr_err("invalid stream state = %d\n",
1088 str_info->status);
1089 return -EINVAL;
1090 }
1091
1092 str_info->prev = str_info->status;
1093 str_info->status = STREAM_DECODE;
1094
1095 for (i = 0; i < dbufs->ibufs->entries; i++)
1096 cum_input_given += dbufs->ibufs->buff_entry[i].size;
1097 for (i = 0; i < dbufs->obufs->entries; i++)
1098 cum_output_given += dbufs->obufs->buff_entry[i].size;
1099
1100 /* input and output buffer allocation */
1101 retval = sst_allocate_decode_buf(str_info, dbufs,
1102 cum_input_given, cum_output_given);
1103 if (retval) {
1104 pr_err("mem allocation failed, abort!!!\n");
1105 retval = -ENOMEM;
1106 goto finish;
1107 }
1108
1109 str_info->decode_isize = str_info->idecode_alloc;
1110 str_info->decode_ibuf_type = dbufs->ibufs->type;
1111 str_info->decode_obuf_type = dbufs->obufs->type;
1112
1113 while ((copy_out_done == false) && (copy_in_done == false)) {
1114 in_copied = 0;
1115 new_entry_flag = false;
1116 retval = sst_prepare_input_buffers(str_info,\
1117 dbufs, &input_index, &in_copied,
1118 &input_index_valid_size, &new_entry_flag);
1119 if (retval) {
1120 pr_err("prepare in buffers failed\n");
1121 goto finish;
1122 }
1123
1124 if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
1125 str_info->decode_obuf = sst_drv_ctx->mmap_mem;
1126
1127 #ifdef CONFIG_MRST_RAR_HANDLER
1128 else {
1129 if (dbufs->obufs->type == SST_BUF_RAR) {
1130 struct RAR_buffer rar_buffers;
1131 __u32 info;
1132
1133 pr_debug("DRM");
1134 retval = copy_from_user((void *) &info,
1135 dbufs->obufs->
1136 buff_entry[output_index].buffer,
1137 sizeof(__u32));
1138
1139 rar_buffers.info.size = dbufs->obufs->
1140 buff_entry[output_index].size;
1141 rar_buffers.info.handle = info;
1142 retval = sst_get_RAR(&rar_buffers, 1);
1143 if (retval)
1144 return retval;
1145
1146 str_info->decode_obuf = (void *)((unsigned long)
1147 rar_buffers.bus_address);
1148 str_info->decode_osize = dbufs->obufs->
1149 buff_entry[output_index].size;
1150 str_info->decode_obuf_type = dbufs->obufs->type;
1151 pr_debug("DRM handling\n");
1152 pr_debug("o/p_add=0x%lu Size=0x%x\n",
1153 (unsigned long) str_info->decode_obuf,
1154 str_info->decode_osize);
1155 } else {
1156 str_info->decode_obuf = sst_drv_ctx->mmap_mem;
1157 str_info->decode_osize = dbufs->obufs->
1158 buff_entry[output_index].size;
1159
1160 }
1161 }
1162 #endif
1163 if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
1164 if (str_info->decode_isize > in_copied) {
1165 str_info->decode_isize = in_copied;
1166 pr_debug("i/p size = %d\n",
1167 str_info->decode_isize);
1168 }
1169 }
1170
1171
1172 retval = sst_send_decode_mess(str_id, str_info, &dec_info);
1173 if (retval || dec_info.input_bytes_consumed == 0) {
1174 pr_err("SST ERR: mess failed or no input consumed\n");
1175 goto finish;
1176 }
1177 input_bytes = dec_info.input_bytes_consumed;
1178 output_bytes = dec_info.output_bytes_produced;
1179
1180 pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
1181 in_copied, input_bytes, output_bytes);
1182 if (dbufs->obufs->type == SST_BUF_RAR) {
1183 output_index += 1;
1184 if (output_index == dbufs->obufs->entries) {
1185 copy_in_done = true;
1186 pr_debug("all i/p cpy done\n");
1187 }
1188 total_output += output_bytes;
1189 } else {
1190 out_copied = 0;
1191 output_size = output_bytes;
1192 retval = sst_prepare_output_buffers(str_info, dbufs,
1193 &output_index, output_size, &out_copied);
1194 if (retval) {
1195 pr_err("prep out buff fail\n");
1196 goto finish;
1197 }
1198 if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
1199 if (in_copied != input_bytes) {
1200 int bytes_left = in_copied -
1201 input_bytes;
1202 pr_debug("bytes %d\n",
1203 bytes_left);
1204 if (new_entry_flag == true)
1205 input_index--;
1206 while (bytes_left) {
1207 struct snd_sst_buffs *ibufs;
1208 struct snd_sst_buff_entry
1209 *buff_entry;
1210 unsigned int size_sent;
1211
1212 ibufs = dbufs->ibufs;
1213 buff_entry =
1214 &ibufs->buff_entry[input_index];
1215 size_sent = buff_entry->size -\
1216 input_index_valid_size;
1217 if (bytes_left == size_sent) {
1218 bytes_left = 0;
1219 } else if (bytes_left <
1220 size_sent) {
1221 buff_entry->buffer +=
1222 (size_sent -
1223 bytes_left);
1224 buff_entry->size -=
1225 (size_sent -
1226 bytes_left);
1227 bytes_left = 0;
1228 } else {
1229 bytes_left -= size_sent;
1230 input_index--;
1231 input_index_valid_size =
1232 0;
1233 }
1234 }
1235
1236 }
1237 }
1238
1239 total_output += out_copied;
1240 if (str_info->decode_osize != out_copied) {
1241 str_info->decode_osize -= out_copied;
1242 pr_debug("output size modified = %d\n",
1243 str_info->decode_osize);
1244 }
1245 }
1246 total_input += input_bytes;
1247
1248 if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
1249 if (total_input == cum_input_given)
1250 copy_in_done = true;
1251 copy_out_done = true;
1252
1253 } else {
1254 if (total_output == cum_output_given) {
1255 copy_out_done = true;
1256 pr_debug("all o/p cpy done\n");
1257 }
1258
1259 if (total_input == cum_input_given) {
1260 copy_in_done = true;
1261 pr_debug("all i/p cpy done\n");
1262 }
1263 }
1264
1265 pr_debug("copy_out = %d, copy_in = %d\n",
1266 copy_out_done, copy_in_done);
1267 }
1268
1269 finish:
1270 dbufs->input_bytes_consumed = total_input;
1271 dbufs->output_bytes_produced = total_output;
1272 str_info->status = str_info->prev;
1273 str_info->prev = STREAM_DECODE;
1274 kfree(str_info->decode_ibuf);
1275 str_info->decode_ibuf = NULL;
1276 return retval;
1277 }