]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
[media] c8sectpfe: STiH407/10 Linux DVB demux support
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / platform / sti / c8sectpfe / c8sectpfe-core.c
CommitLineData
c5f5d0f9
PG
1/*
2 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
3 *
4 * Copyright (c) STMicroelectronics 2015
5 *
6 * Author:Peter Bennett <peter.bennett@st.com>
7 * Peter Griffin <peter.griffin@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 */
14#include <linux/atomic.h>
15#include <linux/clk.h>
16#include <linux/completion.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dvb/dmx.h>
21#include <linux/dvb/frontend.h>
22#include <linux/errno.h>
23#include <linux/firmware.h>
24#include <linux/init.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/of_gpio.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/usb.h>
32#include <linux/slab.h>
33#include <linux/time.h>
34#include <linux/version.h>
35#include <linux/wait.h>
36
37#include "c8sectpfe-core.h"
38#include "c8sectpfe-common.h"
39#include "c8sectpfe-debugfs.h"
40#include "dmxdev.h"
41#include "dvb_demux.h"
42#include "dvb_frontend.h"
43#include "dvb_net.h"
44
45#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
46MODULE_FIRMWARE(FIRMWARE_MEMDMA);
47
48#define PID_TABLE_SIZE 1024
49#define POLL_MSECS 50
50
51static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
52
53#define TS_PKT_SIZE 188
54#define HEADER_SIZE (4)
55#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
56
57#define FEI_ALIGNMENT (32)
58/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
59#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
60
61#define FIFO_LEN 1024
62
63static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
64{
65 struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
66 struct channel_info *channel;
67 int chan_num;
68
69 /* iterate through input block channels */
70 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
71 channel = fei->channel_data[chan_num];
72
73 /* is this descriptor initialised and TP enabled */
74 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
75 tasklet_schedule(&channel->tsklet);
76 }
77
78 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
79 add_timer(&fei->timer);
80}
81
82static void channel_swdemux_tsklet(unsigned long data)
83{
84 struct channel_info *channel = (struct channel_info *)data;
85 struct c8sectpfei *fei = channel->fei;
86 unsigned long wp, rp;
87 int pos, num_packets, n, size;
88 u8 *buf;
89
90 if (unlikely(!channel || !channel->irec))
91 return;
92
93 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
94 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
95
96 pos = rp - channel->back_buffer_busaddr;
97
98 /* has it wrapped */
99 if (wp < rp)
100 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
101
102 size = wp - rp;
103 num_packets = size / PACKET_SIZE;
104
105 /* manage cache so data is visible to CPU */
106 dma_sync_single_for_cpu(fei->dev,
107 rp,
108 size,
109 DMA_FROM_DEVICE);
110
111 buf = (u8 *) channel->back_buffer_aligned;
112
113 dev_dbg(fei->dev,
114 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
115 "rp=0x%lx, wp=0x%lx\n",
116 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
117
118 for (n = 0; n < num_packets; n++) {
119 dvb_dmx_swfilter_packets(
120 &fei->c8sectpfe[0]->
121 demux[channel->demux_mapping].dvb_demux,
122 &buf[pos], 1);
123
124 pos += PACKET_SIZE;
125 }
126
127 /* advance the read pointer */
128 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
129 writel(channel->back_buffer_busaddr, channel->irec +
130 DMA_PRDS_BUSRP_TP(0));
131 else
132 writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
133}
134
135static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
136{
137 struct dvb_demux *demux = dvbdmxfeed->demux;
138 struct stdemux *stdemux = (struct stdemux *)demux->priv;
139 struct c8sectpfei *fei = stdemux->c8sectpfei;
140 struct channel_info *channel;
141 u32 tmp;
142 unsigned long *bitmap;
143
144 switch (dvbdmxfeed->type) {
145 case DMX_TYPE_TS:
146 break;
147 case DMX_TYPE_SEC:
148 break;
149 default:
150 dev_err(fei->dev, "%s:%d Error bailing\n"
151 , __func__, __LINE__);
152 return -EINVAL;
153 }
154
155 if (dvbdmxfeed->type == DMX_TYPE_TS) {
156 switch (dvbdmxfeed->pes_type) {
157 case DMX_PES_VIDEO:
158 case DMX_PES_AUDIO:
159 case DMX_PES_TELETEXT:
160 case DMX_PES_PCR:
161 case DMX_PES_OTHER:
162 break;
163 default:
164 dev_err(fei->dev, "%s:%d Error bailing\n"
165 , __func__, __LINE__);
166 return -EINVAL;
167 }
168 }
169
170 if (!atomic_read(&fei->fw_loaded)) {
171 dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
172 return -EINVAL;
173 }
174
175 mutex_lock(&fei->lock);
176
177 channel = fei->channel_data[stdemux->tsin_index];
178
179 bitmap = (unsigned long *) channel->pid_buffer_aligned;
180
181 /* 8192 is a special PID */
182 if (dvbdmxfeed->pid == 8192) {
183 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
184 tmp &= ~C8SECTPFE_PID_ENABLE;
185 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
186
187 } else {
188 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
189 }
190
191 /* manage cache so PID bitmap is visible to HW */
192 dma_sync_single_for_device(fei->dev,
193 channel->pid_buffer_busaddr,
194 PID_TABLE_SIZE,
195 DMA_TO_DEVICE);
196
197 channel->active = 1;
198
199 if (fei->global_feed_count == 0) {
200 fei->timer.expires = jiffies +
201 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
202
203 add_timer(&fei->timer);
204 }
205
206 if (stdemux->running_feed_count == 0) {
207
208 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
209
210 tasklet_init(&channel->tsklet, channel_swdemux_tsklet,
211 (unsigned long) channel);
212
213 /* Reset the internal inputblock sram pointers */
214 writel(channel->fifo,
215 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
216 writel(channel->fifo + FIFO_LEN - 1,
217 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
218
219 writel(channel->fifo,
220 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
221 writel(channel->fifo,
222 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
223
224
225 /* reset read / write memdma ptrs for this channel */
226 writel(channel->back_buffer_busaddr, channel->irec +
227 DMA_PRDS_BUSBASE_TP(0));
228
229 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
230 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
231
232 writel(channel->back_buffer_busaddr, channel->irec +
233 DMA_PRDS_BUSWP_TP(0));
234
235 /* Issue a reset and enable InputBlock */
236 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
237 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
238
239 /* and enable the tp */
240 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
241
242 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
243 , __func__, __LINE__, stdemux);
244 }
245
246 stdemux->running_feed_count++;
247 fei->global_feed_count++;
248
249 mutex_unlock(&fei->lock);
250
251 return 0;
252}
253
254static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
255{
256
257 struct dvb_demux *demux = dvbdmxfeed->demux;
258 struct stdemux *stdemux = (struct stdemux *)demux->priv;
259 struct c8sectpfei *fei = stdemux->c8sectpfei;
260 struct channel_info *channel;
261 int idlereq;
262 u32 tmp;
263 int ret;
264 unsigned long *bitmap;
265
266 if (!atomic_read(&fei->fw_loaded)) {
267 dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
268 return -EINVAL;
269 }
270
271 mutex_lock(&fei->lock);
272
273 channel = fei->channel_data[stdemux->tsin_index];
274
275 bitmap = (unsigned long *) channel->pid_buffer_aligned;
276
277 if (dvbdmxfeed->pid == 8192) {
278 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
279 tmp |= C8SECTPFE_PID_ENABLE;
280 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
281 } else {
282 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
283 }
284
285 /* manage cache so data is visible to HW */
286 dma_sync_single_for_device(fei->dev,
287 channel->pid_buffer_busaddr,
288 PID_TABLE_SIZE,
289 DMA_TO_DEVICE);
290
291 if (--stdemux->running_feed_count == 0) {
292
293 channel = fei->channel_data[stdemux->tsin_index];
294
295 /* TP re-configuration on page 168 of functional spec */
296
297 /* disable IB (prevents more TS data going to memdma) */
298 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
299
300 /* disable this channels descriptor */
301 writel(0, channel->irec + DMA_PRDS_TPENABLE);
302
303 tasklet_disable(&channel->tsklet);
304
305 /* now request memdma channel goes idle */
306 idlereq = (1 << channel->tsin_id) | IDLEREQ;
307 writel(idlereq, fei->io + DMA_IDLE_REQ);
308
309 /* wait for idle irq handler to signal completion */
310 ret = wait_for_completion_timeout(&channel->idle_completion,
311 msecs_to_jiffies(100));
312
313 if (ret == 0)
314 dev_warn(fei->dev,
315 "Timeout waiting for idle irq on tsin%d\n",
316 channel->tsin_id);
317
318 reinit_completion(&channel->idle_completion);
319
320 /* reset read / write ptrs for this channel */
321
322 writel(channel->back_buffer_busaddr,
323 channel->irec + DMA_PRDS_BUSBASE_TP(0));
324
325 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
326 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
327
328 writel(channel->back_buffer_busaddr,
329 channel->irec + DMA_PRDS_BUSWP_TP(0));
330
331 dev_dbg(fei->dev,
332 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
333 __func__, __LINE__, stdemux, channel->tsin_id);
334
335 /* turn off all PIDS in the bitmap */
336 memset((void *)channel->pid_buffer_aligned
337 , 0x00, PID_TABLE_SIZE);
338
339 /* manage cache so data is visible to HW */
340 dma_sync_single_for_device(fei->dev,
341 channel->pid_buffer_busaddr,
342 PID_TABLE_SIZE,
343 DMA_TO_DEVICE);
344
345 channel->active = 0;
346 }
347
348 if (--fei->global_feed_count == 0) {
349 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
350 , __func__, __LINE__, fei->global_feed_count);
351
352 del_timer(&fei->timer);
353 }
354
355 mutex_unlock(&fei->lock);
356
357 return 0;
358}
359
360static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
361{
362 int i;
363
364 for (i = 0; i < C8SECTPFE_MAXCHANNEL; i++) {
365 if (!fei->channel_data[i])
366 continue;
367
368 if (fei->channel_data[i]->tsin_id == tsin_num)
369 return fei->channel_data[i];
370 }
371
372 return NULL;
373}
374
375static void c8sectpfe_getconfig(struct c8sectpfei *fei)
376{
377 struct c8sectpfe_hw *hw = &fei->hw_stats;
378
379 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
380 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
381 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
382 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
383 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
384 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
385 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
386
387 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
388 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
389 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
390 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
391 , hw->num_swts);
392 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
393 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
394 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
395 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
396 , hw->num_tp);
397}
398
399static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
400{
401 struct c8sectpfei *fei = priv;
402 struct channel_info *chan;
403 int bit;
404 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
405
406 /* page 168 of functional spec: Clear the idle request
407 by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
408
409 /* signal idle completion */
410 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
411
412 chan = find_channel(fei, bit);
413
414 if (chan)
415 complete(&chan->idle_completion);
416 }
417
418 writel(0, fei->io + DMA_IDLE_REQ);
419
420 return IRQ_HANDLED;
421}
422
423
424static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
425{
426 if (!fei || !tsin)
427 return;
428
429 if (tsin->back_buffer_busaddr)
430 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
431 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
432 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
433
434 kfree(tsin->back_buffer_start);
435
436 if (tsin->pid_buffer_busaddr)
437 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
438 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
439 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
440
441 kfree(tsin->pid_buffer_start);
442}
443
444#define MAX_NAME 20
445
446static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
447 struct channel_info *tsin)
448{
449 int ret;
450 u32 tmp;
451 char tsin_pin_name[MAX_NAME];
452
453 if (!fei || !tsin)
454 return -EINVAL;
455
456 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
457 , __func__, __LINE__, tsin, tsin->tsin_id);
458
459 init_completion(&tsin->idle_completion);
460
461 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE +
462 FEI_ALIGNMENT, GFP_KERNEL);
463
464 if (!tsin->back_buffer_start) {
465 ret = -ENOMEM;
466 goto err_unmap;
467 }
468
469 /* Ensure backbuffer is 32byte aligned */
470 tsin->back_buffer_aligned = tsin->back_buffer_start
471 + FEI_ALIGNMENT;
472
473 tsin->back_buffer_aligned = (void *)
474 (((uintptr_t) tsin->back_buffer_aligned) & ~0x1F);
475
476 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
477 (void *)tsin->back_buffer_aligned,
478 FEI_BUFFER_SIZE,
479 DMA_BIDIRECTIONAL);
480
481 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
482 dev_err(fei->dev, "failed to map back_buffer\n");
483 ret = -EFAULT;
484 goto err_unmap;
485 }
486
487 /*
488 * The pid buffer can be configured (in hw) for byte or bit
489 * per pid. By powers of deduction we conclude stih407 family
490 * is configured (at SoC design stage) for bit per pid.
491 */
492 tsin->pid_buffer_start = kzalloc(2048, GFP_KERNEL);
493
494 if (!tsin->pid_buffer_start) {
495 ret = -ENOMEM;
496 goto err_unmap;
497 }
498
499 /*
500 * PID buffer needs to be aligned to size of the pid table
501 * which at bit per pid is 1024 bytes (8192 pids / 8).
502 * PIDF_BASE register enforces this alignment when writing
503 * the register.
504 */
505
506 tsin->pid_buffer_aligned = tsin->pid_buffer_start +
507 PID_TABLE_SIZE;
508
509 tsin->pid_buffer_aligned = (void *)
510 (((uintptr_t) tsin->pid_buffer_aligned) & ~0x3ff);
511
512 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
513 tsin->pid_buffer_aligned,
514 PID_TABLE_SIZE,
515 DMA_BIDIRECTIONAL);
516
517 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
518 dev_err(fei->dev, "failed to map pid_bitmap\n");
519 ret = -EFAULT;
520 goto err_unmap;
521 }
522
523 /* manage cache so pid bitmap is visible to HW */
524 dma_sync_single_for_device(fei->dev,
525 tsin->pid_buffer_busaddr,
526 PID_TABLE_SIZE,
527 DMA_TO_DEVICE);
528
529 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
530 (tsin->serial_not_parallel ? "serial" : "parallel"));
531
532 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
533 if (IS_ERR(tsin->pstate)) {
534 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
535 , __func__, tsin_pin_name);
536 ret = PTR_ERR(tsin->pstate);
537 goto err_unmap;
538 }
539
540 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
541
542 if (ret) {
543 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
544 , __func__);
545 goto err_unmap;
546 }
547
548 /* Enable this input block */
549 tmp = readl(fei->io + SYS_INPUT_CLKEN);
550 tmp |= BIT(tsin->tsin_id);
551 writel(tmp, fei->io + SYS_INPUT_CLKEN);
552
553 if (tsin->serial_not_parallel)
554 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
555
556 if (tsin->invert_ts_clk)
557 tmp |= C8SECTPFE_INVERT_TSCLK;
558
559 if (tsin->async_not_sync)
560 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
561
562 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
563
564 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
565
566 writel(C8SECTPFE_SYNC(0x9) |
567 C8SECTPFE_DROP(0x9) |
568 C8SECTPFE_TOKEN(0x47),
569 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
570
571 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
572
573 /* Place the FIFO's at the end of the irec descriptors */
574
575 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
576
577 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
578 writel(tsin->fifo + FIFO_LEN - 1,
579 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
580
581 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
582 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
583
584 writel(tsin->pid_buffer_busaddr,
585 fei->io + PIDF_BASE(tsin->tsin_id));
586
587 dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=0x%x\n",
588 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
589 tsin->pid_buffer_busaddr);
590
591 /* Configure and enable HW PID filtering */
592
593 /*
594 * The PID value is created by assembling the first 8 bytes of
595 * the TS packet into a 64-bit word in big-endian format. A
596 * slice of that 64-bit word is taken from
597 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
598 */
599 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
600 | C8SECTPFE_PID_OFFSET(40));
601
602 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
603
604 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
605 tsin->tsin_id,
606 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
607 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
608 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
609 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
610
611 /* Get base addpress of pointer record block from DMEM */
612 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
613 readl(fei->io + DMA_PTRREC_BASE);
614
615 /* fill out pointer record data structure */
616
617 /* advance pointer record block to our channel */
618 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
619
620 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
621
622 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
623
624 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
625
626 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
627
628 /* read/write pointers with physical bus address */
629
630 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
631
632 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
633 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
634
635 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
636 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
637
638 /* initialize tasklet */
639 tasklet_init(&tsin->tsklet, channel_swdemux_tsklet,
640 (unsigned long) tsin);
641
642 return 0;
643
644err_unmap:
645 free_input_block(fei, tsin);
646 return ret;
647}
648
649static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
650{
651 struct c8sectpfei *fei = priv;
652
653 dev_err(fei->dev, "%s: error handling not yet implemented\n"
654 , __func__);
655
656 /*
657 * TODO FIXME we should detect some error conditions here
658 * and ideally so something about them!
659 */
660
661 return IRQ_HANDLED;
662}
663
664static int c8sectpfe_probe(struct platform_device *pdev)
665{
666 struct device *dev = &pdev->dev;
667 struct device_node *child, *np = dev->of_node;
668 struct c8sectpfei *fei;
669 struct resource *res;
670 int ret, index = 0;
671 struct channel_info *tsin;
672
673 /* Allocate the c8sectpfei structure */
674 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
675 if (!fei)
676 return -ENOMEM;
677
678 fei->dev = dev;
679
680 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
681 fei->io = devm_ioremap_resource(dev, res);
682 if (IS_ERR(fei->io))
683 return PTR_ERR(fei->io);
684
685 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
686 "c8sectpfe-ram");
687 fei->sram = devm_ioremap_resource(dev, res);
688 if (IS_ERR(fei->sram))
689 return PTR_ERR(fei->sram);
690
691 fei->sram_size = res->end - res->start;
692
693 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
694 if (fei->idle_irq < 0) {
695 dev_err(dev, "Can't get c8sectpfe-idle-irq\n");
696 return fei->idle_irq;
697 }
698
699 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
700 if (fei->error_irq < 0) {
701 dev_err(dev, "Can't get c8sectpfe-error-irq\n");
702 return fei->error_irq;
703 }
704
705 platform_set_drvdata(pdev, fei);
706
707 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
708 if (IS_ERR(fei->c8sectpfeclk)) {
709 dev_err(dev, "c8sectpfe clk not found\n");
710 return PTR_ERR(fei->c8sectpfeclk);
711 }
712
713 ret = clk_prepare_enable(fei->c8sectpfeclk);
714 if (ret) {
715 dev_err(dev, "Failed to enable c8sectpfe clock\n");
716 return ret;
717 }
718
719 /* to save power disable all IP's (on by default) */
720 writel(0, fei->io + SYS_INPUT_CLKEN);
721
722 /* Enable memdma clock */
723 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
724
725 /* clear internal sram */
726 memset_io(fei->sram, 0x0, fei->sram_size);
727
728 c8sectpfe_getconfig(fei);
729
730 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
731 0, "c8sectpfe-idle-irq", fei);
732 if (ret) {
733 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
734 goto err_clk_disable;
735 }
736
737 ret = devm_request_irq(dev, fei->error_irq,
738 c8sectpfe_error_irq_handler, 0,
739 "c8sectpfe-error-irq", fei);
740 if (ret) {
741 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
742 goto err_clk_disable;
743 }
744
745 fei->tsin_count = of_get_child_count(np);
746
747 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
748 fei->tsin_count > fei->hw_stats.num_ib) {
749
750 dev_err(dev, "More tsin declared than exist on SoC!\n");
751 ret = -EINVAL;
752 goto err_clk_disable;
753 }
754
755 fei->pinctrl = devm_pinctrl_get(dev);
756
757 if (IS_ERR(fei->pinctrl)) {
758 dev_err(dev, "Error getting tsin pins\n");
759 ret = PTR_ERR(fei->pinctrl);
760 goto err_clk_disable;
761 }
762
763 for_each_child_of_node(np, child) {
764 struct device_node *i2c_bus;
765
766 fei->channel_data[index] = devm_kzalloc(dev,
767 sizeof(struct channel_info),
768 GFP_KERNEL);
769
770 if (!fei->channel_data[index]) {
771 ret = -ENOMEM;
772 goto err_clk_disable;
773 }
774
775 tsin = fei->channel_data[index];
776
777 tsin->fei = fei;
778
779 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
780 if (ret) {
781 dev_err(&pdev->dev, "No tsin_num found\n");
782 goto err_clk_disable;
783 }
784
785 /* sanity check value */
786 if (tsin->tsin_id > fei->hw_stats.num_ib) {
787 dev_err(&pdev->dev,
788 "tsin-num %d specified greater than number\n\t"
789 "of input block hw in SoC! (%d)",
790 tsin->tsin_id, fei->hw_stats.num_ib);
791 ret = -EINVAL;
792 goto err_clk_disable;
793 }
794
795 tsin->invert_ts_clk = of_property_read_bool(child,
796 "invert-ts-clk");
797
798 tsin->serial_not_parallel = of_property_read_bool(child,
799 "serial-not-parallel");
800
801 tsin->async_not_sync = of_property_read_bool(child,
802 "async-not-sync");
803
804 ret = of_property_read_u32(child, "dvb-card",
805 &tsin->dvb_card);
806 if (ret) {
807 dev_err(&pdev->dev, "No dvb-card found\n");
808 goto err_clk_disable;
809 }
810
811 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
812 if (!i2c_bus) {
813 dev_err(&pdev->dev, "No i2c-bus found\n");
814 goto err_clk_disable;
815 }
816 tsin->i2c_adapter =
817 of_find_i2c_adapter_by_node(i2c_bus);
818 if (!tsin->i2c_adapter) {
819 dev_err(&pdev->dev, "No i2c adapter found\n");
820 of_node_put(i2c_bus);
821 goto err_clk_disable;
822 }
823 of_node_put(i2c_bus);
824
825 tsin->rst_gpio = of_get_named_gpio(child, "rst-gpio", 0);
826
827 ret = gpio_is_valid(tsin->rst_gpio);
828 if (!ret) {
829 dev_err(dev,
830 "reset gpio for tsin%d not valid (gpio=%d)\n",
831 tsin->tsin_id, tsin->rst_gpio);
832 goto err_clk_disable;
833 }
834
835 ret = devm_gpio_request_one(dev, tsin->rst_gpio,
836 GPIOF_OUT_INIT_LOW, "NIM reset");
837 if (ret && ret != -EBUSY) {
838 dev_err(dev, "Can't request tsin%d reset gpio\n"
839 , fei->channel_data[index]->tsin_id);
840 goto err_clk_disable;
841 }
842
843 if (!ret) {
844 /* toggle reset lines */
845 gpio_direction_output(tsin->rst_gpio, 0);
846 usleep_range(3500, 5000);
847 gpio_direction_output(tsin->rst_gpio, 1);
848 usleep_range(3000, 5000);
849 }
850
851 tsin->demux_mapping = index;
852
853 dev_dbg(fei->dev,
854 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
855 "serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
856 fei->channel_data[index], index,
857 tsin->tsin_id, tsin->invert_ts_clk,
858 tsin->serial_not_parallel, tsin->async_not_sync,
859 tsin->dvb_card);
860
861 index++;
862 }
863
864 /* Setup timer interrupt */
865 init_timer(&fei->timer);
866 fei->timer.function = c8sectpfe_timer_interrupt;
867 fei->timer.data = (unsigned long)fei;
868
869 mutex_init(&fei->lock);
870
871 /* Get the configuration information about the tuners */
872 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
873 (void *)fei,
874 c8sectpfe_start_feed,
875 c8sectpfe_stop_feed);
876 if (ret) {
877 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
878 ret);
879 goto err_clk_disable;
880 }
881
882 /* ensure all other init has been done before requesting firmware */
883 ret = load_c8sectpfe_fw_step1(fei);
884 if (ret) {
885 dev_err(dev, "Couldn't load slim core firmware\n");
886 goto err_clk_disable;
887 }
888
889 c8sectpfe_debugfs_init(fei);
890
891 return 0;
892
893err_clk_disable:
894 /* TODO uncomment when upstream has taken a reference on this clk */
895 /*clk_disable_unprepare(fei->c8sectpfeclk);*/
896 return ret;
897}
898
899static int c8sectpfe_remove(struct platform_device *pdev)
900{
901 struct c8sectpfei *fei = platform_get_drvdata(pdev);
902 struct channel_info *channel;
903 int i;
904
905 wait_for_completion(&fei->fw_ack);
906
907 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
908
909 /*
910 * Now loop through and un-configure each of the InputBlock resources
911 */
912 for (i = 0; i < fei->tsin_count; i++) {
913 channel = fei->channel_data[i];
914 free_input_block(fei, channel);
915 }
916
917 c8sectpfe_debugfs_exit(fei);
918
919 dev_info(fei->dev, "Stopping memdma SLIM core\n");
920 if (readl(fei->io + DMA_CPU_RUN))
921 writel(0x0, fei->io + DMA_CPU_RUN);
922
923 /* unclock all internal IP's */
924 if (readl(fei->io + SYS_INPUT_CLKEN))
925 writel(0, fei->io + SYS_INPUT_CLKEN);
926
927 if (readl(fei->io + SYS_OTHER_CLKEN))
928 writel(0, fei->io + SYS_OTHER_CLKEN);
929
930 /* TODO uncomment when upstream has taken a reference on this clk */
931 /*
932 if (fei->c8sectpfeclk)
933 clk_disable_unprepare(fei->c8sectpfeclk);
934 */
935
936 return 0;
937}
938
939
940static int configure_channels(struct c8sectpfei *fei)
941{
942 int index = 0, ret;
943 struct channel_info *tsin;
944 struct device_node *child, *np = fei->dev->of_node;
945
946 /* iterate round each tsin and configure memdma descriptor and IB hw */
947 for_each_child_of_node(np, child) {
948
949 tsin = fei->channel_data[index];
950
951 ret = configure_memdma_and_inputblock(fei,
952 fei->channel_data[index]);
953
954 if (ret) {
955 dev_err(fei->dev,
956 "configure_memdma_and_inputblock failed\n");
957 goto err_unmap;
958 }
959 index++;
960 }
961
962 return 0;
963
964err_unmap:
965 for (index = 0; index < fei->tsin_count; index++) {
966 tsin = fei->channel_data[index];
967 free_input_block(fei, tsin);
968 }
969 return ret;
970}
971
972static int
973c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
974{
975 struct elf32_hdr *ehdr;
976 char class;
977
978 if (!fw) {
979 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
980 return -EINVAL;
981 }
982
983 if (fw->size < sizeof(struct elf32_hdr)) {
984 dev_err(fei->dev, "Image is too small\n");
985 return -EINVAL;
986 }
987
988 ehdr = (struct elf32_hdr *)fw->data;
989
990 /* We only support ELF32 at this point */
991 class = ehdr->e_ident[EI_CLASS];
992 if (class != ELFCLASS32) {
993 dev_err(fei->dev, "Unsupported class: %d\n", class);
994 return -EINVAL;
995 }
996
997 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
998 dev_err(fei->dev, "Unsupported firmware endianness\n");
999 return -EINVAL;
1000 }
1001
1002 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
1003 dev_err(fei->dev, "Image is too small\n");
1004 return -EINVAL;
1005 }
1006
1007 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
1008 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
1009 return -EINVAL;
1010 }
1011
1012 /* Check ELF magic */
1013 ehdr = (Elf32_Ehdr *)fw->data;
1014 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
1015 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
1016 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
1017 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
1018 dev_err(fei->dev, "Invalid ELF magic\n");
1019 return -EINVAL;
1020 }
1021
1022 if (ehdr->e_type != ET_EXEC) {
1023 dev_err(fei->dev, "Unsupported ELF header type\n");
1024 return -EINVAL;
1025 }
1026
1027 if (ehdr->e_phoff > fw->size) {
1028 dev_err(fei->dev, "Firmware size is too small\n");
1029 return -EINVAL;
1030 }
1031
1032 return 0;
1033}
1034
1035
1036static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1037 const struct firmware *fw, u8 __iomem *dest,
1038 int seg_num)
1039{
1040 const u8 *imem_src = fw->data + phdr->p_offset;
1041 int i;
1042
1043 /*
1044 * For IMEM segments, the segment contains 24-bit
1045 * instructions which must be padded to 32-bit
1046 * instructions before being written. The written
1047 * segment is padded with NOP instructions.
1048 */
1049
1050 dev_dbg(fei->dev,
1051 "Loading IMEM segment %d 0x%08x\n\t"
1052 " (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
1053 phdr->p_paddr, phdr->p_filesz,
1054 dest, phdr->p_memsz + phdr->p_memsz / 3);
1055
1056 for (i = 0; i < phdr->p_filesz; i++) {
1057
1058 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1059
1060 /* Every 3 bytes, add an additional
1061 * padding zero in destination */
1062 if (i % 3 == 2) {
1063 dest++;
1064 writeb(0x00, (void __iomem *)dest);
1065 }
1066
1067 dest++;
1068 imem_src++;
1069 }
1070}
1071
1072static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1073 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1074{
1075 /*
1076 * For DMEM segments copy the segment data from the ELF
1077 * file and pad segment with zeroes
1078 */
1079
1080 dev_dbg(fei->dev,
1081 "Loading DMEM segment %d 0x%08x\n\t"
1082 "(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1083 seg_num, phdr->p_paddr, phdr->p_filesz,
1084 dst, phdr->p_memsz);
1085
1086 memcpy((void __iomem *)dst, (void *)fw->data + phdr->p_offset,
1087 phdr->p_filesz);
1088
1089 memset((void __iomem *)dst + phdr->p_filesz, 0,
1090 phdr->p_memsz - phdr->p_filesz);
1091}
1092
1093static int load_slim_core_fw(const struct firmware *fw, void *context)
1094{
1095 struct c8sectpfei *fei = context;
1096 Elf32_Ehdr *ehdr;
1097 Elf32_Phdr *phdr;
1098 u8 __iomem *dst;
1099 int err, i;
1100
1101 if (!fw || !context)
1102 return -EINVAL;
1103
1104 ehdr = (Elf32_Ehdr *)fw->data;
1105 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1106
1107 /* go through the available ELF segments */
1108 for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) {
1109
1110 /* Only consider LOAD segments */
1111 if (phdr->p_type != PT_LOAD)
1112 continue;
1113
1114 /*
1115 * Check segment is contained within the fw->data buffer
1116 */
1117 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1118 dev_err(fei->dev,
1119 "Segment %d is outside of firmware file\n", i);
1120 err = -EINVAL;
1121 break;
1122 }
1123
1124 /*
1125 * MEMDMA IMEM has executable flag set, otherwise load
1126 * this segment into DMEM.
1127 *
1128 */
1129
1130 if (phdr->p_flags & PF_X) {
1131 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1132 /*
1133 * The Slim ELF file uses 32-bit word addressing for
1134 * load offsets.
1135 */
1136 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1137 load_imem_segment(fei, phdr, fw, dst, i);
1138 } else {
1139 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1140 /*
1141 * The Slim ELF file uses 32-bit word addressing for
1142 * load offsets.
1143 */
1144 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1145 load_dmem_segment(fei, phdr, fw, dst, i);
1146 }
1147 }
1148
1149 release_firmware(fw);
1150 return err;
1151}
1152
1153static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
1154{
1155 struct c8sectpfei *fei = context;
1156 int err;
1157
1158 err = c8sectpfe_elf_sanity_check(fei, fw);
1159 if (err) {
1160 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1161 , err);
1162 goto err;
1163 }
1164
1165 err = load_slim_core_fw(fw, context);
1166 if (err) {
1167 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1168 goto err;
1169 }
1170
1171 /* now the firmware is loaded configure the input blocks */
1172 err = configure_channels(fei);
1173 if (err) {
1174 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1175 goto err;
1176 }
1177
1178 /*
1179 * STBus target port can access IMEM and DMEM ports
1180 * without waiting for CPU
1181 */
1182 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1183
1184 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1185 writel(0x1, fei->io + DMA_CPU_RUN);
1186
1187 atomic_set(&fei->fw_loaded, 1);
1188err:
1189 complete_all(&fei->fw_ack);
1190}
1191
1192static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
1193{
1194 int ret;
1195 int err;
1196
1197 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1198
1199 init_completion(&fei->fw_ack);
1200 atomic_set(&fei->fw_loaded, 0);
1201
1202 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1203 FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
1204 load_c8sectpfe_fw_cb);
1205
1206 if (err) {
1207 dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
1208 complete_all(&fei->fw_ack);
1209 return ret;
1210 }
1211
1212 return 0;
1213}
1214
1215static const struct of_device_id c8sectpfe_match[] = {
1216 { .compatible = "st,stih407-c8sectpfe" },
1217 { /* sentinel */ },
1218};
1219MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1220
1221static struct platform_driver c8sectpfe_driver = {
1222 .driver = {
1223 .name = "c8sectpfe",
1224 .of_match_table = of_match_ptr(c8sectpfe_match),
1225 },
1226 .probe = c8sectpfe_probe,
1227 .remove = c8sectpfe_remove,
1228};
1229
1230module_platform_driver(c8sectpfe_driver);
1231
1232MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1233MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1234MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1235MODULE_LICENSE("GPL");