]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/cris/arch-v32/drivers/sync_serial.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-bionic-kernel.git] / arch / cris / arch-v32 / drivers / sync_serial.c
1 /*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
3 *
4 * Copyright (c) 2005 Axis Communications AB
5 *
6 * Author: Mikael Starvik
7 *
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/smp_lock.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
22
23 #include <asm/io.h>
24 #include <dma.h>
25 #include <pinmux.h>
26 #include <hwregs/reg_rdwr.h>
27 #include <hwregs/sser_defs.h>
28 #include <hwregs/dma_defs.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/intr_vect_defs.h>
31 #include <hwregs/intr_vect.h>
32 #include <hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
34
35
36 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
37 /* */
38 /* Three DMA descriptors are linked together. Each DMA descriptor is */
39 /* responsible for port->bufchunk of a common buffer. */
40 /* */
41 /* +---------------------------------------------+ */
42 /* | +----------+ +----------+ +----------+ | */
43 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
44 /* +----------+ +----------+ +----------+ */
45 /* | | | */
46 /* v v v */
47 /* +-------------------------------------+ */
48 /* | BUFFER | */
49 /* +-------------------------------------+ */
50 /* |<- data_avail ->| */
51 /* readp writep */
52 /* */
53 /* If the application keeps up the pace readp will be right after writep.*/
54 /* If the application can't keep the pace we have to throw away data. */
55 /* The idea is that readp should be ready with the data pointed out by */
56 /* Descr[i] when the DMA has filled in Descr[i+1]. */
57 /* Otherwise we will discard */
58 /* the rest of the data pointed out by Descr1 and set readp to the start */
59 /* of Descr2 */
60
61 #define SYNC_SERIAL_MAJOR 125
62
63 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
64 /* words can be handled */
65 #define IN_BUFFER_SIZE 12288
66 #define IN_DESCR_SIZE 256
67 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
68
69 #define OUT_BUFFER_SIZE 1024*8
70 #define NBR_OUT_DESCR 8
71
72 #define DEFAULT_FRAME_RATE 0
73 #define DEFAULT_WORD_RATE 7
74
75 /* NOTE: Enabling some debug will likely cause overrun or underrun,
76 * especially if manual mode is use.
77 */
78 #define DEBUG(x)
79 #define DEBUGREAD(x)
80 #define DEBUGWRITE(x)
81 #define DEBUGPOLL(x)
82 #define DEBUGRXINT(x)
83 #define DEBUGTXINT(x)
84 #define DEBUGTRDMA(x)
85 #define DEBUGOUTBUF(x)
86
87 typedef struct sync_port
88 {
89 reg_scope_instances regi_sser;
90 reg_scope_instances regi_dmain;
91 reg_scope_instances regi_dmaout;
92
93 char started; /* 1 if port has been started */
94 char port_nbr; /* Port 0 or 1 */
95 char busy; /* 1 if port is busy */
96
97 char enabled; /* 1 if port is enabled */
98 char use_dma; /* 1 if port uses dma */
99 char tr_running;
100
101 char init_irqs;
102 int output;
103 int input;
104
105 /* Next byte to be read by application */
106 volatile unsigned char *volatile readp;
107 /* Next byte to be written by etrax */
108 volatile unsigned char *volatile writep;
109
110 unsigned int in_buffer_size;
111 unsigned int inbufchunk;
112 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
113 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
114 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
115 struct dma_descr_data* next_rx_desc;
116 struct dma_descr_data* prev_rx_desc;
117
118 /* Pointer to the first available descriptor in the ring,
119 * unless active_tr_descr == catch_tr_descr and a dma
120 * transfer is active */
121 struct dma_descr_data *active_tr_descr;
122
123 /* Pointer to the first allocated descriptor in the ring */
124 struct dma_descr_data *catch_tr_descr;
125
126 /* Pointer to the descriptor with the current end-of-list */
127 struct dma_descr_data *prev_tr_descr;
128 int full;
129
130 /* Pointer to the first byte being read by DMA
131 * or current position in out_buffer if not using DMA. */
132 unsigned char *out_rd_ptr;
133
134 /* Number of bytes currently locked for being read by DMA */
135 int out_buf_count;
136
137 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
138 dma_descr_context in_context __attribute__ ((__aligned__(32)));
139 dma_descr_data out_descr[NBR_OUT_DESCR]
140 __attribute__ ((__aligned__(16)));
141 dma_descr_context out_context __attribute__ ((__aligned__(32)));
142 wait_queue_head_t out_wait_q;
143 wait_queue_head_t in_wait_q;
144
145 spinlock_t lock;
146 } sync_port;
147
148 static int etrax_sync_serial_init(void);
149 static void initialize_port(int portnbr);
150 static inline int sync_data_avail(struct sync_port *port);
151
152 static int sync_serial_open(struct inode *, struct file*);
153 static int sync_serial_release(struct inode*, struct file*);
154 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
155
156 static int sync_serial_ioctl(struct inode*, struct file*,
157 unsigned int cmd, unsigned long arg);
158 static ssize_t sync_serial_write(struct file * file, const char * buf,
159 size_t count, loff_t *ppos);
160 static ssize_t sync_serial_read(struct file *file, char *buf,
161 size_t count, loff_t *ppos);
162
163 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
164 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
165 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
167 #define SYNC_SER_DMA
168 #endif
169
170 static void send_word(sync_port* port);
171 static void start_dma_out(struct sync_port *port, const char *data, int count);
172 static void start_dma_in(sync_port* port);
173 #ifdef SYNC_SER_DMA
174 static irqreturn_t tr_interrupt(int irq, void *dev_id);
175 static irqreturn_t rx_interrupt(int irq, void *dev_id);
176 #endif
177
178 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
179 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
180 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
182 #define SYNC_SER_MANUAL
183 #endif
184 #ifdef SYNC_SER_MANUAL
185 static irqreturn_t manual_interrupt(int irq, void *dev_id);
186 #endif
187
188 #ifdef CONFIG_ETRAXFS /* ETRAX FS */
189 #define OUT_DMA_NBR 4
190 #define IN_DMA_NBR 5
191 #define PINMUX_SSER pinmux_sser0
192 #define SYNCSER_INST regi_sser0
193 #define SYNCSER_INTR_VECT SSER0_INTR_VECT
194 #define OUT_DMA_INST regi_dma4
195 #define IN_DMA_INST regi_dma5
196 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
197 #define DMA_IN_INTR_VECT DMA5_INTR_VECT
198 #define REQ_DMA_SYNCSER dma_sser0
199 #else /* Artpec-3 */
200 #define OUT_DMA_NBR 6
201 #define IN_DMA_NBR 7
202 #define PINMUX_SSER pinmux_sser
203 #define SYNCSER_INST regi_sser
204 #define SYNCSER_INTR_VECT SSER_INTR_VECT
205 #define OUT_DMA_INST regi_dma6
206 #define IN_DMA_INST regi_dma7
207 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
208 #define DMA_IN_INTR_VECT DMA7_INTR_VECT
209 #define REQ_DMA_SYNCSER dma_sser
210 #endif
211
212 /* The ports */
213 static struct sync_port ports[]=
214 {
215 {
216 .regi_sser = SYNCSER_INST,
217 .regi_dmaout = OUT_DMA_INST,
218 .regi_dmain = IN_DMA_INST,
219 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
220 .use_dma = 1,
221 #else
222 .use_dma = 0,
223 #endif
224 }
225 #ifdef CONFIG_ETRAXFS
226 ,
227
228 {
229 .regi_sser = regi_sser1,
230 .regi_dmaout = regi_dma6,
231 .regi_dmain = regi_dma7,
232 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
233 .use_dma = 1,
234 #else
235 .use_dma = 0,
236 #endif
237 }
238 #endif
239 };
240
241 #define NBR_PORTS ARRAY_SIZE(ports)
242
243 static const struct file_operations sync_serial_fops = {
244 .owner = THIS_MODULE,
245 .write = sync_serial_write,
246 .read = sync_serial_read,
247 .poll = sync_serial_poll,
248 .ioctl = sync_serial_ioctl,
249 .open = sync_serial_open,
250 .release = sync_serial_release
251 };
252
253 static int __init etrax_sync_serial_init(void)
254 {
255 ports[0].enabled = 0;
256 #ifdef CONFIG_ETRAXFS
257 ports[1].enabled = 0;
258 #endif
259 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
260 &sync_serial_fops) < 0) {
261 printk(KERN_WARNING
262 "Unable to get major for synchronous serial port\n");
263 return -EBUSY;
264 }
265
266 /* Initialize Ports */
267 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
268 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
269 printk(KERN_WARNING
270 "Unable to alloc pins for synchronous serial port 0\n");
271 return -EIO;
272 }
273 ports[0].enabled = 1;
274 initialize_port(0);
275 #endif
276
277 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
278 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
279 printk(KERN_WARNING
280 "Unable to alloc pins for synchronous serial port 0\n");
281 return -EIO;
282 }
283 ports[1].enabled = 1;
284 initialize_port(1);
285 #endif
286
287 #ifdef CONFIG_ETRAXFS
288 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
289 #else
290 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
291 #endif
292 return 0;
293 }
294
295 static void __init initialize_port(int portnbr)
296 {
297 int __attribute__((unused)) i;
298 struct sync_port *port = &ports[portnbr];
299 reg_sser_rw_cfg cfg = {0};
300 reg_sser_rw_frm_cfg frm_cfg = {0};
301 reg_sser_rw_tr_cfg tr_cfg = {0};
302 reg_sser_rw_rec_cfg rec_cfg = {0};
303
304 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
305
306 port->port_nbr = portnbr;
307 port->init_irqs = 1;
308
309 port->out_rd_ptr = port->out_buffer;
310 port->out_buf_count = 0;
311
312 port->output = 1;
313 port->input = 0;
314
315 port->readp = port->flip;
316 port->writep = port->flip;
317 port->in_buffer_size = IN_BUFFER_SIZE;
318 port->inbufchunk = IN_DESCR_SIZE;
319 port->next_rx_desc = &port->in_descr[0];
320 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
321 port->prev_rx_desc->eol = 1;
322
323 init_waitqueue_head(&port->out_wait_q);
324 init_waitqueue_head(&port->in_wait_q);
325
326 spin_lock_init(&port->lock);
327
328 cfg.out_clk_src = regk_sser_intern_clk;
329 cfg.out_clk_pol = regk_sser_pos;
330 cfg.clk_od_mode = regk_sser_no;
331 cfg.clk_dir = regk_sser_out;
332 cfg.gate_clk = regk_sser_no;
333 cfg.base_freq = regk_sser_f29_493;
334 cfg.clk_div = 256;
335 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
336
337 frm_cfg.wordrate = DEFAULT_WORD_RATE;
338 frm_cfg.type = regk_sser_edge;
339 frm_cfg.frame_pin_dir = regk_sser_out;
340 frm_cfg.frame_pin_use = regk_sser_frm;
341 frm_cfg.status_pin_dir = regk_sser_in;
342 frm_cfg.status_pin_use = regk_sser_hold;
343 frm_cfg.out_on = regk_sser_tr;
344 frm_cfg.tr_delay = 1;
345 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
346
347 tr_cfg.urun_stop = regk_sser_no;
348 tr_cfg.sample_size = 7;
349 tr_cfg.sh_dir = regk_sser_msbfirst;
350 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
351 #if 0
352 tr_cfg.rate_ctrl = regk_sser_bulk;
353 tr_cfg.data_pin_use = regk_sser_dout;
354 #else
355 tr_cfg.rate_ctrl = regk_sser_iso;
356 tr_cfg.data_pin_use = regk_sser_dout;
357 #endif
358 tr_cfg.bulk_wspace = 1;
359 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
360
361 rec_cfg.sample_size = 7;
362 rec_cfg.sh_dir = regk_sser_msbfirst;
363 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
364 rec_cfg.fifo_thr = regk_sser_inf;
365 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
366
367 #ifdef SYNC_SER_DMA
368 /* Setup the descriptor ring for dma out/transmit. */
369 for (i = 0; i < NBR_OUT_DESCR; i++) {
370 port->out_descr[i].wait = 0;
371 port->out_descr[i].intr = 1;
372 port->out_descr[i].eol = 0;
373 port->out_descr[i].out_eop = 0;
374 port->out_descr[i].next =
375 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
376 }
377
378 /* Create a ring from the list. */
379 port->out_descr[NBR_OUT_DESCR-1].next =
380 (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
381
382 /* Setup context for traversing the ring. */
383 port->active_tr_descr = &port->out_descr[0];
384 port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
385 port->catch_tr_descr = &port->out_descr[0];
386 #endif
387 }
388
389 static inline int sync_data_avail(struct sync_port *port)
390 {
391 int avail;
392 unsigned char *start;
393 unsigned char *end;
394
395 start = (unsigned char*)port->readp; /* cast away volatile */
396 end = (unsigned char*)port->writep; /* cast away volatile */
397 /* 0123456789 0123456789
398 * ----- - -----
399 * ^rp ^wp ^wp ^rp
400 */
401
402 if (end >= start)
403 avail = end - start;
404 else
405 avail = port->in_buffer_size - (start - end);
406 return avail;
407 }
408
409 static inline int sync_data_avail_to_end(struct sync_port *port)
410 {
411 int avail;
412 unsigned char *start;
413 unsigned char *end;
414
415 start = (unsigned char*)port->readp; /* cast away volatile */
416 end = (unsigned char*)port->writep; /* cast away volatile */
417 /* 0123456789 0123456789
418 * ----- -----
419 * ^rp ^wp ^wp ^rp
420 */
421
422 if (end >= start)
423 avail = end - start;
424 else
425 avail = port->flip + port->in_buffer_size - start;
426 return avail;
427 }
428
429 static int sync_serial_open(struct inode *inode, struct file *file)
430 {
431 int dev = iminor(inode);
432 int ret = -EBUSY;
433 sync_port *port;
434 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
435 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
436
437 lock_kernel();
438 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
439
440 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
441 {
442 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
443 ret = -ENODEV;
444 goto out;
445 }
446 port = &ports[dev];
447 /* Allow open this device twice (assuming one reader and one writer) */
448 if (port->busy == 2)
449 {
450 DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
451 goto out;
452 }
453
454
455 if (port->init_irqs) {
456 if (port->use_dma) {
457 if (port == &ports[0]) {
458 #ifdef SYNC_SER_DMA
459 if (request_irq(DMA_OUT_INTR_VECT,
460 tr_interrupt,
461 0,
462 "synchronous serial 0 dma tr",
463 &ports[0])) {
464 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
465 goto out;
466 } else if (request_irq(DMA_IN_INTR_VECT,
467 rx_interrupt,
468 0,
469 "synchronous serial 1 dma rx",
470 &ports[0])) {
471 free_irq(DMA_OUT_INTR_VECT, &port[0]);
472 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
473 goto out;
474 } else if (crisv32_request_dma(OUT_DMA_NBR,
475 "synchronous serial 0 dma tr",
476 DMA_VERBOSE_ON_ERROR,
477 0,
478 REQ_DMA_SYNCSER)) {
479 free_irq(DMA_OUT_INTR_VECT, &port[0]);
480 free_irq(DMA_IN_INTR_VECT, &port[0]);
481 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
482 goto out;
483 } else if (crisv32_request_dma(IN_DMA_NBR,
484 "synchronous serial 0 dma rec",
485 DMA_VERBOSE_ON_ERROR,
486 0,
487 REQ_DMA_SYNCSER)) {
488 crisv32_free_dma(OUT_DMA_NBR);
489 free_irq(DMA_OUT_INTR_VECT, &port[0]);
490 free_irq(DMA_IN_INTR_VECT, &port[0]);
491 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
492 goto out;
493 }
494 #endif
495 }
496 #ifdef CONFIG_ETRAXFS
497 else if (port == &ports[1]) {
498 #ifdef SYNC_SER_DMA
499 if (request_irq(DMA6_INTR_VECT,
500 tr_interrupt,
501 0,
502 "synchronous serial 1 dma tr",
503 &ports[1])) {
504 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
505 goto out;
506 } else if (request_irq(DMA7_INTR_VECT,
507 rx_interrupt,
508 0,
509 "synchronous serial 1 dma rx",
510 &ports[1])) {
511 free_irq(DMA6_INTR_VECT, &ports[1]);
512 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
513 goto out;
514 } else if (crisv32_request_dma(
515 SYNC_SER1_TX_DMA_NBR,
516 "synchronous serial 1 dma tr",
517 DMA_VERBOSE_ON_ERROR,
518 0,
519 dma_sser1)) {
520 free_irq(DMA6_INTR_VECT, &ports[1]);
521 free_irq(DMA7_INTR_VECT, &ports[1]);
522 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
523 goto out;
524 } else if (crisv32_request_dma(
525 SYNC_SER1_RX_DMA_NBR,
526 "synchronous serial 3 dma rec",
527 DMA_VERBOSE_ON_ERROR,
528 0,
529 dma_sser1)) {
530 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
531 free_irq(DMA6_INTR_VECT, &ports[1]);
532 free_irq(DMA7_INTR_VECT, &ports[1]);
533 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
534 goto out;
535 }
536 #endif
537 }
538 #endif
539 /* Enable DMAs */
540 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
541 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
542 /* Enable DMA IRQs */
543 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
544 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
545 /* Set up wordsize = 1 for DMAs. */
546 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
547 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
548
549 start_dma_in(port);
550 port->init_irqs = 0;
551 } else { /* !port->use_dma */
552 #ifdef SYNC_SER_MANUAL
553 if (port == &ports[0]) {
554 if (request_irq(SYNCSER_INTR_VECT,
555 manual_interrupt,
556 0,
557 "synchronous serial manual irq",
558 &ports[0])) {
559 printk("Can't allocate sync serial manual irq");
560 goto out;
561 }
562 }
563 #ifdef CONFIG_ETRAXFS
564 else if (port == &ports[1]) {
565 if (request_irq(SSER1_INTR_VECT,
566 manual_interrupt,
567 0,
568 "synchronous serial manual irq",
569 &ports[1])) {
570 printk(KERN_CRIT "Can't allocate sync serial manual irq");
571 goto out;
572 }
573 }
574 #endif
575 port->init_irqs = 0;
576 #else
577 panic("sync_serial: Manual mode not supported.\n");
578 #endif /* SYNC_SER_MANUAL */
579 }
580
581 } /* port->init_irqs */
582
583 port->busy++;
584 ret = 0;
585 out:
586 unlock_kernel();
587 return ret;
588 }
589
590 static int sync_serial_release(struct inode *inode, struct file *file)
591 {
592 int dev = iminor(inode);
593 sync_port *port;
594
595 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
596 {
597 DEBUG(printk("Invalid minor %d\n", dev));
598 return -ENODEV;
599 }
600 port = &ports[dev];
601 if (port->busy)
602 port->busy--;
603 if (!port->busy)
604 /* XXX */ ;
605 return 0;
606 }
607
608 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
609 {
610 int dev = iminor(file->f_path.dentry->d_inode);
611 unsigned int mask = 0;
612 sync_port *port;
613 DEBUGPOLL( static unsigned int prev_mask = 0; );
614
615 port = &ports[dev];
616
617 if (!port->started) {
618 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
619 reg_sser_rw_rec_cfg rec_cfg =
620 REG_RD(sser, port->regi_sser, rw_rec_cfg);
621 cfg.en = regk_sser_yes;
622 rec_cfg.rec_en = port->input;
623 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
624 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
625 port->started = 1;
626 }
627
628 poll_wait(file, &port->out_wait_q, wait);
629 poll_wait(file, &port->in_wait_q, wait);
630
631 /* No active transfer, descriptors are available */
632 if (port->output && !port->tr_running)
633 mask |= POLLOUT | POLLWRNORM;
634
635 /* Descriptor and buffer space available. */
636 if (port->output &&
637 port->active_tr_descr != port->catch_tr_descr &&
638 port->out_buf_count < OUT_BUFFER_SIZE)
639 mask |= POLLOUT | POLLWRNORM;
640
641 /* At least an inbufchunk of data */
642 if (port->input && sync_data_avail(port) >= port->inbufchunk)
643 mask |= POLLIN | POLLRDNORM;
644
645 DEBUGPOLL(if (mask != prev_mask)
646 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
647 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
648 prev_mask = mask;
649 );
650 return mask;
651 }
652
653 static int sync_serial_ioctl(struct inode *inode, struct file *file,
654 unsigned int cmd, unsigned long arg)
655 {
656 int return_val = 0;
657 int dma_w_size = regk_dma_set_w_size1;
658 int dev = iminor(file->f_path.dentry->d_inode);
659 sync_port *port;
660 reg_sser_rw_tr_cfg tr_cfg;
661 reg_sser_rw_rec_cfg rec_cfg;
662 reg_sser_rw_frm_cfg frm_cfg;
663 reg_sser_rw_cfg gen_cfg;
664 reg_sser_rw_intr_mask intr_mask;
665
666 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
667 {
668 DEBUG(printk("Invalid minor %d\n", dev));
669 return -1;
670 }
671 port = &ports[dev];
672 spin_lock_irq(&port->lock);
673
674 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
675 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
676 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
677 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
678 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
679
680 switch(cmd)
681 {
682 case SSP_SPEED:
683 if (GET_SPEED(arg) == CODEC)
684 {
685 unsigned int freq;
686
687 gen_cfg.base_freq = regk_sser_f32;
688
689 /* Clock divider will internally be
690 * gen_cfg.clk_div + 1.
691 */
692
693 freq = GET_FREQ(arg);
694 switch (freq) {
695 case FREQ_32kHz:
696 case FREQ_64kHz:
697 case FREQ_128kHz:
698 case FREQ_256kHz:
699 gen_cfg.clk_div = 125 *
700 (1 << (freq - FREQ_256kHz)) - 1;
701 break;
702 case FREQ_512kHz:
703 gen_cfg.clk_div = 62;
704 break;
705 case FREQ_1MHz:
706 case FREQ_2MHz:
707 case FREQ_4MHz:
708 gen_cfg.clk_div = 8 * (1 << freq) - 1;
709 break;
710 }
711 } else {
712 gen_cfg.base_freq = regk_sser_f29_493;
713 switch (GET_SPEED(arg)) {
714 case SSP150:
715 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
716 break;
717 case SSP300:
718 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
719 break;
720 case SSP600:
721 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
722 break;
723 case SSP1200:
724 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
725 break;
726 case SSP2400:
727 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
728 break;
729 case SSP4800:
730 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
731 break;
732 case SSP9600:
733 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
734 break;
735 case SSP19200:
736 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
737 break;
738 case SSP28800:
739 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
740 break;
741 case SSP57600:
742 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
743 break;
744 case SSP115200:
745 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
746 break;
747 case SSP230400:
748 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
749 break;
750 case SSP460800:
751 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
752 break;
753 case SSP921600:
754 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
755 break;
756 case SSP3125000:
757 gen_cfg.base_freq = regk_sser_f100;
758 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
759 break;
760
761 }
762 }
763 frm_cfg.wordrate = GET_WORD_RATE(arg);
764
765 break;
766 case SSP_MODE:
767 switch(arg)
768 {
769 case MASTER_OUTPUT:
770 port->output = 1;
771 port->input = 0;
772 frm_cfg.out_on = regk_sser_tr;
773 frm_cfg.frame_pin_dir = regk_sser_out;
774 gen_cfg.clk_dir = regk_sser_out;
775 break;
776 case SLAVE_OUTPUT:
777 port->output = 1;
778 port->input = 0;
779 frm_cfg.frame_pin_dir = regk_sser_in;
780 gen_cfg.clk_dir = regk_sser_in;
781 break;
782 case MASTER_INPUT:
783 port->output = 0;
784 port->input = 1;
785 frm_cfg.frame_pin_dir = regk_sser_out;
786 frm_cfg.out_on = regk_sser_intern_tb;
787 gen_cfg.clk_dir = regk_sser_out;
788 break;
789 case SLAVE_INPUT:
790 port->output = 0;
791 port->input = 1;
792 frm_cfg.frame_pin_dir = regk_sser_in;
793 gen_cfg.clk_dir = regk_sser_in;
794 break;
795 case MASTER_BIDIR:
796 port->output = 1;
797 port->input = 1;
798 frm_cfg.frame_pin_dir = regk_sser_out;
799 frm_cfg.out_on = regk_sser_intern_tb;
800 gen_cfg.clk_dir = regk_sser_out;
801 break;
802 case SLAVE_BIDIR:
803 port->output = 1;
804 port->input = 1;
805 frm_cfg.frame_pin_dir = regk_sser_in;
806 gen_cfg.clk_dir = regk_sser_in;
807 break;
808 default:
809 spin_unlock_irq(&port->lock);
810 return -EINVAL;
811 }
812 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
813 intr_mask.rdav = regk_sser_yes;
814 break;
815 case SSP_FRAME_SYNC:
816 if (arg & NORMAL_SYNC) {
817 frm_cfg.rec_delay = 1;
818 frm_cfg.tr_delay = 1;
819 }
820 else if (arg & EARLY_SYNC)
821 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
822 else if (arg & SECOND_WORD_SYNC) {
823 frm_cfg.rec_delay = 7;
824 frm_cfg.tr_delay = 1;
825 }
826
827 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
828 frm_cfg.early_wend = regk_sser_yes;
829 if (arg & BIT_SYNC)
830 frm_cfg.type = regk_sser_edge;
831 else if (arg & WORD_SYNC)
832 frm_cfg.type = regk_sser_level;
833 else if (arg & EXTENDED_SYNC)
834 frm_cfg.early_wend = regk_sser_no;
835
836 if (arg & SYNC_ON)
837 frm_cfg.frame_pin_use = regk_sser_frm;
838 else if (arg & SYNC_OFF)
839 frm_cfg.frame_pin_use = regk_sser_gio0;
840
841 dma_w_size = regk_dma_set_w_size2;
842 if (arg & WORD_SIZE_8) {
843 rec_cfg.sample_size = tr_cfg.sample_size = 7;
844 dma_w_size = regk_dma_set_w_size1;
845 } else if (arg & WORD_SIZE_12)
846 rec_cfg.sample_size = tr_cfg.sample_size = 11;
847 else if (arg & WORD_SIZE_16)
848 rec_cfg.sample_size = tr_cfg.sample_size = 15;
849 else if (arg & WORD_SIZE_24)
850 rec_cfg.sample_size = tr_cfg.sample_size = 23;
851 else if (arg & WORD_SIZE_32)
852 rec_cfg.sample_size = tr_cfg.sample_size = 31;
853
854 if (arg & BIT_ORDER_MSB)
855 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
856 else if (arg & BIT_ORDER_LSB)
857 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
858
859 if (arg & FLOW_CONTROL_ENABLE) {
860 frm_cfg.status_pin_use = regk_sser_frm;
861 rec_cfg.fifo_thr = regk_sser_thr16;
862 } else if (arg & FLOW_CONTROL_DISABLE) {
863 frm_cfg.status_pin_use = regk_sser_gio0;
864 rec_cfg.fifo_thr = regk_sser_inf;
865 }
866
867 if (arg & CLOCK_NOT_GATED)
868 gen_cfg.gate_clk = regk_sser_no;
869 else if (arg & CLOCK_GATED)
870 gen_cfg.gate_clk = regk_sser_yes;
871
872 break;
873 case SSP_IPOLARITY:
874 /* NOTE!! negedge is considered NORMAL */
875 if (arg & CLOCK_NORMAL)
876 rec_cfg.clk_pol = regk_sser_neg;
877 else if (arg & CLOCK_INVERT)
878 rec_cfg.clk_pol = regk_sser_pos;
879
880 if (arg & FRAME_NORMAL)
881 frm_cfg.level = regk_sser_pos_hi;
882 else if (arg & FRAME_INVERT)
883 frm_cfg.level = regk_sser_neg_lo;
884
885 if (arg & STATUS_NORMAL)
886 gen_cfg.hold_pol = regk_sser_pos;
887 else if (arg & STATUS_INVERT)
888 gen_cfg.hold_pol = regk_sser_neg;
889 break;
890 case SSP_OPOLARITY:
891 if (arg & CLOCK_NORMAL)
892 gen_cfg.out_clk_pol = regk_sser_pos;
893 else if (arg & CLOCK_INVERT)
894 gen_cfg.out_clk_pol = regk_sser_neg;
895
896 if (arg & FRAME_NORMAL)
897 frm_cfg.level = regk_sser_pos_hi;
898 else if (arg & FRAME_INVERT)
899 frm_cfg.level = regk_sser_neg_lo;
900
901 if (arg & STATUS_NORMAL)
902 gen_cfg.hold_pol = regk_sser_pos;
903 else if (arg & STATUS_INVERT)
904 gen_cfg.hold_pol = regk_sser_neg;
905 break;
906 case SSP_SPI:
907 rec_cfg.fifo_thr = regk_sser_inf;
908 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
909 rec_cfg.sample_size = tr_cfg.sample_size = 7;
910 frm_cfg.frame_pin_use = regk_sser_frm;
911 frm_cfg.type = regk_sser_level;
912 frm_cfg.tr_delay = 1;
913 frm_cfg.level = regk_sser_neg_lo;
914 if (arg & SPI_SLAVE)
915 {
916 rec_cfg.clk_pol = regk_sser_neg;
917 gen_cfg.clk_dir = regk_sser_in;
918 port->input = 1;
919 port->output = 0;
920 }
921 else
922 {
923 gen_cfg.out_clk_pol = regk_sser_pos;
924 port->input = 0;
925 port->output = 1;
926 gen_cfg.clk_dir = regk_sser_out;
927 }
928 break;
929 case SSP_INBUFCHUNK:
930 break;
931 default:
932 return_val = -1;
933 }
934
935
936 if (port->started) {
937 rec_cfg.rec_en = port->input;
938 gen_cfg.en = (port->output | port->input);
939 }
940
941 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
942 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
943 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
944 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
945 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
946
947
948 if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
949 WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
950 int en = gen_cfg.en;
951 gen_cfg.en = 0;
952 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
953 /* ##### Should DMA be stoped before we change dma size? */
954 DMA_WR_CMD(port->regi_dmain, dma_w_size);
955 DMA_WR_CMD(port->regi_dmaout, dma_w_size);
956 gen_cfg.en = en;
957 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
958 }
959
960 spin_unlock_irq(&port->lock);
961 return return_val;
962 }
963
964 /* NOTE: sync_serial_write does not support concurrency */
965 static ssize_t sync_serial_write(struct file *file, const char *buf,
966 size_t count, loff_t *ppos)
967 {
968 int dev = iminor(file->f_path.dentry->d_inode);
969 DECLARE_WAITQUEUE(wait, current);
970 struct sync_port *port;
971 int trunc_count;
972 unsigned long flags;
973 int bytes_free;
974 int out_buf_count;
975
976 unsigned char *rd_ptr; /* First allocated byte in the buffer */
977 unsigned char *wr_ptr; /* First free byte in the buffer */
978 unsigned char *buf_stop_ptr; /* Last byte + 1 */
979
980 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
981 DEBUG(printk("Invalid minor %d\n", dev));
982 return -ENODEV;
983 }
984 port = &ports[dev];
985
986 /* |<- OUT_BUFFER_SIZE ->|
987 * |<- out_buf_count ->|
988 * |<- trunc_count ->| ...->|
989 * ______________________________________________________
990 * | free | data | free |
991 * |_________|___________________|________________________|
992 * ^ rd_ptr ^ wr_ptr
993 */
994 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
995 port->port_nbr, count, port->active_tr_descr,
996 port->catch_tr_descr));
997
998 /* Read variables that may be updated by interrupts */
999 spin_lock_irqsave(&port->lock, flags);
1000 rd_ptr = port->out_rd_ptr;
1001 out_buf_count = port->out_buf_count;
1002 spin_unlock_irqrestore(&port->lock, flags);
1003
1004 /* Check if resources are available */
1005 if (port->tr_running &&
1006 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1007 out_buf_count >= OUT_BUFFER_SIZE)) {
1008 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
1009 return -EAGAIN;
1010 }
1011
1012 buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
1013
1014 /* Determine pointer to the first free byte, before copying. */
1015 wr_ptr = rd_ptr + out_buf_count;
1016 if (wr_ptr >= buf_stop_ptr)
1017 wr_ptr -= OUT_BUFFER_SIZE;
1018
1019 /* If we wrap the ring buffer, let the user space program handle it by
1020 * truncating the data. This could be more elegant, small buffer
1021 * fragments may occur.
1022 */
1023 bytes_free = OUT_BUFFER_SIZE - out_buf_count;
1024 if (wr_ptr + bytes_free > buf_stop_ptr)
1025 bytes_free = buf_stop_ptr - wr_ptr;
1026 trunc_count = (count < bytes_free) ? count : bytes_free;
1027
1028 if (copy_from_user(wr_ptr, buf, trunc_count))
1029 return -EFAULT;
1030
1031 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
1032 out_buf_count, trunc_count,
1033 port->out_buf_count, port->out_buffer,
1034 wr_ptr, buf_stop_ptr));
1035
1036 /* Make sure transmitter/receiver is running */
1037 if (!port->started) {
1038 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1039 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1040 cfg.en = regk_sser_yes;
1041 rec_cfg.rec_en = port->input;
1042 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1043 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1044 port->started = 1;
1045 }
1046
1047 /* Setup wait if blocking */
1048 if (!(file->f_flags & O_NONBLOCK)) {
1049 add_wait_queue(&port->out_wait_q, &wait);
1050 set_current_state(TASK_INTERRUPTIBLE);
1051 }
1052
1053 spin_lock_irqsave(&port->lock, flags);
1054 port->out_buf_count += trunc_count;
1055 if (port->use_dma) {
1056 start_dma_out(port, wr_ptr, trunc_count);
1057 } else if (!port->tr_running) {
1058 reg_sser_rw_intr_mask intr_mask;
1059 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1060 /* Start sender by writing data */
1061 send_word(port);
1062 /* and enable transmitter ready IRQ */
1063 intr_mask.trdy = 1;
1064 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1065 }
1066 spin_unlock_irqrestore(&port->lock, flags);
1067
1068 /* Exit if non blocking */
1069 if (file->f_flags & O_NONBLOCK) {
1070 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
1071 port->port_nbr, trunc_count,
1072 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1073 return trunc_count;
1074 }
1075
1076 schedule();
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(&port->out_wait_q, &wait);
1079
1080 if (signal_pending(current))
1081 return -EINTR;
1082
1083 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
1084 port->port_nbr, trunc_count));
1085 return trunc_count;
1086 }
1087
1088 static ssize_t sync_serial_read(struct file * file, char * buf,
1089 size_t count, loff_t *ppos)
1090 {
1091 int dev = iminor(file->f_path.dentry->d_inode);
1092 int avail;
1093 sync_port *port;
1094 unsigned char* start;
1095 unsigned char* end;
1096 unsigned long flags;
1097
1098 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1099 {
1100 DEBUG(printk("Invalid minor %d\n", dev));
1101 return -ENODEV;
1102 }
1103 port = &ports[dev];
1104
1105 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1106
1107 if (!port->started)
1108 {
1109 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1110 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1111 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1112 cfg.en = regk_sser_yes;
1113 tr_cfg.tr_en = regk_sser_yes;
1114 rec_cfg.rec_en = regk_sser_yes;
1115 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1116 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1117 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1118 port->started = 1;
1119 }
1120
1121 /* Calculate number of available bytes */
1122 /* Save pointers to avoid that they are modified by interrupt */
1123 spin_lock_irqsave(&port->lock, flags);
1124 start = (unsigned char*)port->readp; /* cast away volatile */
1125 end = (unsigned char*)port->writep; /* cast away volatile */
1126 spin_unlock_irqrestore(&port->lock, flags);
1127 while ((start == end) && !port->full) /* No data */
1128 {
1129 DEBUGREAD(printk(KERN_DEBUG "&"));
1130 if (file->f_flags & O_NONBLOCK)
1131 return -EAGAIN;
1132
1133 interruptible_sleep_on(&port->in_wait_q);
1134 if (signal_pending(current))
1135 return -EINTR;
1136
1137 spin_lock_irqsave(&port->lock, flags);
1138 start = (unsigned char*)port->readp; /* cast away volatile */
1139 end = (unsigned char*)port->writep; /* cast away volatile */
1140 spin_unlock_irqrestore(&port->lock, flags);
1141 }
1142
1143 /* Lazy read, never return wrapped data. */
1144 if (port->full)
1145 avail = port->in_buffer_size;
1146 else if (end > start)
1147 avail = end - start;
1148 else
1149 avail = port->flip + port->in_buffer_size - start;
1150
1151 count = count > avail ? avail : count;
1152 if (copy_to_user(buf, start, count))
1153 return -EFAULT;
1154 /* Disable interrupts while updating readp */
1155 spin_lock_irqsave(&port->lock, flags);
1156 port->readp += count;
1157 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1158 port->readp = port->flip;
1159 port->full = 0;
1160 spin_unlock_irqrestore(&port->lock, flags);
1161 DEBUGREAD(printk("r %d\n", count));
1162 return count;
1163 }
1164
1165 static void send_word(sync_port* port)
1166 {
1167 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1168 reg_sser_rw_tr_data tr_data = {0};
1169
1170 switch(tr_cfg.sample_size)
1171 {
1172 case 8:
1173 port->out_buf_count--;
1174 tr_data.data = *port->out_rd_ptr++;
1175 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1176 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1177 port->out_rd_ptr = port->out_buffer;
1178 break;
1179 case 12:
1180 {
1181 int data = (*port->out_rd_ptr++) << 8;
1182 data |= *port->out_rd_ptr++;
1183 port->out_buf_count -= 2;
1184 tr_data.data = data;
1185 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1186 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1187 port->out_rd_ptr = port->out_buffer;
1188 }
1189 break;
1190 case 16:
1191 port->out_buf_count -= 2;
1192 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1193 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1194 port->out_rd_ptr += 2;
1195 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1196 port->out_rd_ptr = port->out_buffer;
1197 break;
1198 case 24:
1199 port->out_buf_count -= 3;
1200 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1201 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1202 port->out_rd_ptr += 2;
1203 tr_data.data = *port->out_rd_ptr++;
1204 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1205 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1206 port->out_rd_ptr = port->out_buffer;
1207 break;
1208 case 32:
1209 port->out_buf_count -= 4;
1210 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1211 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1212 port->out_rd_ptr += 2;
1213 tr_data.data = *(unsigned short *)port->out_rd_ptr;
1214 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1215 port->out_rd_ptr += 2;
1216 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1217 port->out_rd_ptr = port->out_buffer;
1218 break;
1219 }
1220 }
1221
1222 static void start_dma_out(struct sync_port *port,
1223 const char *data, int count)
1224 {
1225 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
1226 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1227 port->active_tr_descr->intr = 1;
1228
1229 port->active_tr_descr->eol = 1;
1230 port->prev_tr_descr->eol = 0;
1231
1232 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
1233 port->prev_tr_descr, port->active_tr_descr));
1234 port->prev_tr_descr = port->active_tr_descr;
1235 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
1236
1237 if (!port->tr_running) {
1238 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1239 rw_tr_cfg);
1240
1241 port->out_context.next = 0;
1242 port->out_context.saved_data =
1243 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1244 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
1245
1246 DMA_START_CONTEXT(port->regi_dmaout,
1247 virt_to_phys((char *)&port->out_context));
1248
1249 tr_cfg.tr_en = regk_sser_yes;
1250 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1251 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
1252 } else {
1253 DMA_CONTINUE_DATA(port->regi_dmaout);
1254 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
1255 }
1256
1257 port->tr_running = 1;
1258 }
1259
1260 static void start_dma_in(sync_port *port)
1261 {
1262 int i;
1263 char *buf;
1264 port->writep = port->flip;
1265
1266 if (port->writep > port->flip + port->in_buffer_size) {
1267 panic("Offset too large in sync serial driver\n");
1268 return;
1269 }
1270 buf = (char*)virt_to_phys(port->in_buffer);
1271 for (i = 0; i < NBR_IN_DESCR; i++) {
1272 port->in_descr[i].buf = buf;
1273 port->in_descr[i].after = buf + port->inbufchunk;
1274 port->in_descr[i].intr = 1;
1275 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1276 port->in_descr[i].buf = buf;
1277 buf += port->inbufchunk;
1278 }
1279 /* Link the last descriptor to the first */
1280 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1281 port->in_descr[i-1].eol = regk_sser_yes;
1282 port->next_rx_desc = &port->in_descr[0];
1283 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1284 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1285 port->in_context.saved_data_buf = port->in_descr[0].buf;
1286 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1287 }
1288
1289 #ifdef SYNC_SER_DMA
1290 static irqreturn_t tr_interrupt(int irq, void *dev_id)
1291 {
1292 reg_dma_r_masked_intr masked;
1293 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1294 reg_dma_rw_stat stat;
1295 int i;
1296 int found = 0;
1297 int stop_sser = 0;
1298
1299 for (i = 0; i < NBR_PORTS; i++) {
1300 sync_port *port = &ports[i];
1301 if (!port->enabled || !port->use_dma)
1302 continue;
1303
1304 /* IRQ active for the port? */
1305 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1306 if (!masked.data)
1307 continue;
1308
1309 found = 1;
1310
1311 /* Check if we should stop the DMA transfer */
1312 stat = REG_RD(dma, port->regi_dmaout, rw_stat);
1313 if (stat.list_state == regk_dma_data_at_eol)
1314 stop_sser = 1;
1315
1316 /* Clear IRQ */
1317 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1318
1319 if (!stop_sser) {
1320 /* The DMA has completed a descriptor, EOL was not
1321 * encountered, so step relevant descriptor and
1322 * datapointers forward. */
1323 int sent;
1324 sent = port->catch_tr_descr->after -
1325 port->catch_tr_descr->buf;
1326 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
1327 "in descr %p (ac: %p)\n",
1328 port->out_buf_count, sent,
1329 port->out_buf_count - sent,
1330 port->catch_tr_descr,
1331 port->active_tr_descr););
1332 port->out_buf_count -= sent;
1333 port->catch_tr_descr =
1334 phys_to_virt((int) port->catch_tr_descr->next);
1335 port->out_rd_ptr =
1336 phys_to_virt((int) port->catch_tr_descr->buf);
1337 } else {
1338 int i, sent;
1339 /* EOL handler.
1340 * Note that if an EOL was encountered during the irq
1341 * locked section of sync_ser_write the DMA will be
1342 * restarted and the eol flag will be cleared.
1343 * The remaining descriptors will be traversed by
1344 * the descriptor interrupts as usual.
1345 */
1346 i = 0;
1347 while (!port->catch_tr_descr->eol) {
1348 sent = port->catch_tr_descr->after -
1349 port->catch_tr_descr->buf;
1350 DEBUGOUTBUF(printk(KERN_DEBUG
1351 "traversing descr %p -%d (%d)\n",
1352 port->catch_tr_descr,
1353 sent,
1354 port->out_buf_count));
1355 port->out_buf_count -= sent;
1356 port->catch_tr_descr = phys_to_virt(
1357 (int)port->catch_tr_descr->next);
1358 i++;
1359 if (i >= NBR_OUT_DESCR) {
1360 /* TODO: Reset and recover */
1361 panic("sync_serial: missing eol");
1362 }
1363 }
1364 sent = port->catch_tr_descr->after -
1365 port->catch_tr_descr->buf;
1366 DEBUGOUTBUF(printk(KERN_DEBUG
1367 "eol at descr %p -%d (%d)\n",
1368 port->catch_tr_descr,
1369 sent,
1370 port->out_buf_count));
1371
1372 port->out_buf_count -= sent;
1373
1374 /* Update read pointer to first free byte, we
1375 * may already be writing data there. */
1376 port->out_rd_ptr =
1377 phys_to_virt((int) port->catch_tr_descr->after);
1378 if (port->out_rd_ptr > port->out_buffer +
1379 OUT_BUFFER_SIZE)
1380 port->out_rd_ptr = port->out_buffer;
1381
1382 reg_sser_rw_tr_cfg tr_cfg =
1383 REG_RD(sser, port->regi_sser, rw_tr_cfg);
1384 DEBUGTXINT(printk(KERN_DEBUG
1385 "tr_int DMA stop %d, set catch @ %p\n",
1386 port->out_buf_count,
1387 port->active_tr_descr));
1388 if (port->out_buf_count != 0)
1389 printk(KERN_CRIT "sync_ser: buffer not "
1390 "empty after eol.\n");
1391 port->catch_tr_descr = port->active_tr_descr;
1392 port->tr_running = 0;
1393 tr_cfg.tr_en = regk_sser_no;
1394 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1395 }
1396 /* wake up the waiting process */
1397 wake_up_interruptible(&port->out_wait_q);
1398 }
1399 return IRQ_RETVAL(found);
1400 } /* tr_interrupt */
1401
1402 static irqreturn_t rx_interrupt(int irq, void *dev_id)
1403 {
1404 reg_dma_r_masked_intr masked;
1405 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1406
1407 int i;
1408 int found = 0;
1409
1410 for (i = 0; i < NBR_PORTS; i++)
1411 {
1412 sync_port *port = &ports[i];
1413
1414 if (!port->enabled || !port->use_dma )
1415 continue;
1416
1417 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1418
1419 if (masked.data) /* Descriptor interrupt */
1420 {
1421 found = 1;
1422 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1423 virt_to_phys(port->next_rx_desc)) {
1424 DEBUGRXINT(printk(KERN_DEBUG "!"));
1425 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1426 int first_size = port->flip + port->in_buffer_size - port->writep;
1427 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1428 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1429 port->writep = port->flip + port->inbufchunk - first_size;
1430 } else {
1431 memcpy((char*)port->writep,
1432 phys_to_virt((unsigned)port->next_rx_desc->buf),
1433 port->inbufchunk);
1434 port->writep += port->inbufchunk;
1435 if (port->writep >= port->flip + port->in_buffer_size)
1436 port->writep = port->flip;
1437 }
1438 if (port->writep == port->readp)
1439 {
1440 port->full = 1;
1441 }
1442
1443 port->next_rx_desc->eol = 1;
1444 port->prev_rx_desc->eol = 0;
1445 /* Cache bug workaround */
1446 flush_dma_descr(port->prev_rx_desc, 0);
1447 port->prev_rx_desc = port->next_rx_desc;
1448 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1449 /* Cache bug workaround */
1450 flush_dma_descr(port->prev_rx_desc, 1);
1451 /* wake up the waiting process */
1452 wake_up_interruptible(&port->in_wait_q);
1453 DMA_CONTINUE(port->regi_dmain);
1454 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1455
1456 }
1457 }
1458 }
1459 return IRQ_RETVAL(found);
1460 } /* rx_interrupt */
1461 #endif /* SYNC_SER_DMA */
1462
1463 #ifdef SYNC_SER_MANUAL
1464 static irqreturn_t manual_interrupt(int irq, void *dev_id)
1465 {
1466 int i;
1467 int found = 0;
1468 reg_sser_r_masked_intr masked;
1469
1470 for (i = 0; i < NBR_PORTS; i++)
1471 {
1472 sync_port *port = &ports[i];
1473
1474 if (!port->enabled || port->use_dma)
1475 {
1476 continue;
1477 }
1478
1479 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1480 if (masked.rdav) /* Data received? */
1481 {
1482 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1483 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1484 found = 1;
1485 /* Read data */
1486 switch(rec_cfg.sample_size)
1487 {
1488 case 8:
1489 *port->writep++ = data.data & 0xff;
1490 break;
1491 case 12:
1492 *port->writep = (data.data & 0x0ff0) >> 4;
1493 *(port->writep + 1) = data.data & 0x0f;
1494 port->writep+=2;
1495 break;
1496 case 16:
1497 *(unsigned short*)port->writep = data.data;
1498 port->writep+=2;
1499 break;
1500 case 24:
1501 *(unsigned int*)port->writep = data.data;
1502 port->writep+=3;
1503 break;
1504 case 32:
1505 *(unsigned int*)port->writep = data.data;
1506 port->writep+=4;
1507 break;
1508 }
1509
1510 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1511 port->writep = port->flip;
1512 if (port->writep == port->readp) {
1513 /* receive buffer overrun, discard oldest data
1514 */
1515 port->readp++;
1516 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1517 port->readp = port->flip;
1518 }
1519 if (sync_data_avail(port) >= port->inbufchunk)
1520 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1521 }
1522
1523 if (masked.trdy) /* Transmitter ready? */
1524 {
1525 found = 1;
1526 if (port->out_buf_count > 0) /* More data to send */
1527 send_word(port);
1528 else /* transmission finished */
1529 {
1530 reg_sser_rw_intr_mask intr_mask;
1531 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1532 intr_mask.trdy = 0;
1533 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1534 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1535 }
1536 }
1537 }
1538 return IRQ_RETVAL(found);
1539 }
1540 #endif
1541
1542 module_init(etrax_sync_serial_init);