]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/staging/xillybus/xillybus_core.c
Merge tag 'jfs-3.12' of git://github.com/kleikamp/linux-shaggy
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / xillybus / xillybus_core.c
CommitLineData
48bae050
EB
1/*
2 * linux/drivers/misc/xillybus_core.c
3 *
4 * Copyright 2011 Xillybus Ltd, http://xillybus.com
5 *
6 * Driver for the Xillybus FPGA/host framework.
7 *
8 * This driver interfaces with a special IP core in an FPGA, setting up
9 * a pipe between a hardware FIFO in the programmable logic and a device
10 * file in the host. The number of such pipes and their attributes are
11 * set up on the logic. This driver detects these automatically and
12 * creates the device files accordingly.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the smems of the GNU General Public License as published by
16 * the Free Software Foundation; version 2 of the License.
17 */
18
19#include <linux/list.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/dma-mapping.h>
24#include <linux/interrupt.h>
25#include <linux/sched.h>
26#include <linux/fs.h>
27#include <linux/cdev.h>
28#include <linux/spinlock.h>
29#include <linux/mutex.h>
48bae050
EB
30#include <linux/crc32.h>
31#include <linux/poll.h>
32#include <linux/delay.h>
48bae050
EB
33#include <linux/slab.h>
34#include <linux/workqueue.h>
35#include "xillybus.h"
36
37MODULE_DESCRIPTION("Xillybus core functions");
38MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
39MODULE_VERSION("1.07");
40MODULE_ALIAS("xillybus_core");
41MODULE_LICENSE("GPL v2");
42
43/* General timeout is 100 ms, rx timeout is 10 ms */
44#define XILLY_RX_TIMEOUT (10*HZ/1000)
45#define XILLY_TIMEOUT (100*HZ/1000)
46
47#define fpga_msg_ctrl_reg 0x0002
48#define fpga_dma_control_reg 0x0008
49#define fpga_dma_bufno_reg 0x0009
50#define fpga_dma_bufaddr_lowaddr_reg 0x000a
51#define fpga_dma_bufaddr_highaddr_reg 0x000b
52#define fpga_buf_ctrl_reg 0x000c
53#define fpga_buf_offset_reg 0x000d
54#define fpga_endian_reg 0x0010
55
56#define XILLYMSG_OPCODE_RELEASEBUF 1
57#define XILLYMSG_OPCODE_QUIESCEACK 2
58#define XILLYMSG_OPCODE_FIFOEOF 3
59#define XILLYMSG_OPCODE_FATAL_ERROR 4
60#define XILLYMSG_OPCODE_NONEMPTY 5
61
e71042f2
EB
62static const char xillyname[] = "xillybus";
63
48bae050
EB
64static struct class *xillybus_class;
65
66/*
67 * ep_list_lock is the last lock to be taken; No other lock requests are
68 * allowed while holding it. It merely protects list_of_endpoints, and not
69 * the endpoints listed in it.
70 */
71
72static LIST_HEAD(list_of_endpoints);
73static struct mutex ep_list_lock;
7ee9ded2 74static struct workqueue_struct *xillybus_wq;
48bae050
EB
75
76/*
77 * Locking scheme: Mutexes protect invocations of character device methods.
78 * If both locks are taken, wr_mutex is taken first, rd_mutex second.
79 *
80 * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
81 * buffers' end_offset fields against changes made by IRQ handler (and in
82 * theory, other file request handlers, but the mutex handles that). Nothing
83 * else.
84 * They are held for short direct memory manipulations. Needless to say,
85 * no mutex locking is allowed when a spinlock is held.
86 *
87 * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
88 *
89 * register_mutex is endpoint-specific, and is held when non-atomic
90 * register operations are performed. wr_mutex and rd_mutex may be
91 * held when register_mutex is taken, but none of the spinlocks. Note that
92 * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
93 * which are unrelated to buf_offset_reg, since they are harmless.
94 *
95 * Blocking on the wait queues is allowed with mutexes held, but not with
96 * spinlocks.
97 *
98 * Only interruptible blocking is allowed on mutexes and wait queues.
99 *
100 * All in all, the locking order goes (with skips allowed, of course):
101 * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
102 */
103
104static void malformed_message(u32 *buf)
105{
106 int opcode;
107 int msg_channel, msg_bufno, msg_data, msg_dir;
108
109 opcode = (buf[0] >> 24) & 0xff;
110 msg_dir = buf[0] & 1;
111 msg_channel = (buf[0] >> 1) & 0x7ff;
112 msg_bufno = (buf[0] >> 12) & 0x3ff;
113 msg_data = buf[1] & 0xfffffff;
114
115 pr_warn("xillybus: Malformed message (skipping): "
116 "opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
117 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
118}
119
120/*
121 * xillybus_isr assumes the interrupt is allocated exclusively to it,
122 * which is the natural case MSI and several other hardware-oriented
123 * interrupts. Sharing is not allowed.
124 */
125
126irqreturn_t xillybus_isr(int irq, void *data)
127{
128 struct xilly_endpoint *ep = data;
129 u32 *buf;
130 unsigned int buf_size;
131 int i;
132 int opcode;
133 unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
134 struct xilly_channel *channel;
135
136 /*
137 * The endpoint structure is altered during periods when it's
138 * guaranteed no interrupt will occur, but in theory, the cache
139 * lines may not be updated. So a memory barrier is issued.
140 */
141
142 smp_rmb();
143
144 buf = ep->msgbuf_addr;
145 buf_size = ep->msg_buf_size/sizeof(u32);
146
147
7ee9ded2 148 ep->ephw->hw_sync_sgl_for_cpu(ep,
48bae050
EB
149 ep->msgbuf_dma_addr,
150 ep->msg_buf_size,
151 DMA_FROM_DEVICE);
152
153 for (i = 0; i < buf_size; i += 2)
154 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
155 malformed_message(&buf[i]);
156 pr_warn("xillybus: Sending a NACK on "
157 "counter %x (instead of %x) on entry %d\n",
158 ((buf[i+1] >> 28) & 0xf),
159 ep->msg_counter,
160 i/2);
161
162 if (++ep->failed_messages > 10)
163 pr_err("xillybus: Lost sync with "
164 "interrupt messages. Stopping.\n");
165 else {
7ee9ded2 166 ep->ephw->hw_sync_sgl_for_device(
48bae050
EB
167 ep,
168 ep->msgbuf_dma_addr,
169 ep->msg_buf_size,
170 DMA_FROM_DEVICE);
171
172 iowrite32(0x01, /* Message NACK */
173 &ep->registers[fpga_msg_ctrl_reg]);
174 }
175 return IRQ_HANDLED;
176 } else if (buf[i] & (1 << 22)) /* Last message */
177 break;
178
179 if (i >= buf_size) {
180 pr_err("xillybus: Bad interrupt message. Stopping.\n");
181 return IRQ_HANDLED;
182 }
183
184 buf_size = i;
185
186 for (i = 0; i <= buf_size; i += 2) { /* Scan through messages */
187 opcode = (buf[i] >> 24) & 0xff;
188
189 msg_dir = buf[i] & 1;
190 msg_channel = (buf[i] >> 1) & 0x7ff;
191 msg_bufno = (buf[i] >> 12) & 0x3ff;
192 msg_data = buf[i+1] & 0xfffffff;
193
194 switch (opcode) {
195 case XILLYMSG_OPCODE_RELEASEBUF:
196
197 if ((msg_channel > ep->num_channels) ||
198 (msg_channel == 0)) {
199 malformed_message(&buf[i]);
200 break;
201 }
202
203 channel = ep->channels[msg_channel];
204
205 if (msg_dir) { /* Write channel */
206 if (msg_bufno >= channel->num_wr_buffers) {
207 malformed_message(&buf[i]);
208 break;
209 }
210 spin_lock(&channel->wr_spinlock);
211 channel->wr_buffers[msg_bufno]->end_offset =
212 msg_data;
213 channel->wr_fpga_buf_idx = msg_bufno;
214 channel->wr_empty = 0;
215 channel->wr_sleepy = 0;
216 spin_unlock(&channel->wr_spinlock);
217
218 wake_up_interruptible(&channel->wr_wait);
219
220 } else {
221 /* Read channel */
222
223 if (msg_bufno >= channel->num_rd_buffers) {
224 malformed_message(&buf[i]);
225 break;
226 }
227
228 spin_lock(&channel->rd_spinlock);
229 channel->rd_fpga_buf_idx = msg_bufno;
230 channel->rd_full = 0;
231 spin_unlock(&channel->rd_spinlock);
232
233 wake_up_interruptible(&channel->rd_wait);
234 if (!channel->rd_synchronous)
235 queue_delayed_work(
236 xillybus_wq,
237 &channel->rd_workitem,
238 XILLY_RX_TIMEOUT);
239 }
240
241 break;
242 case XILLYMSG_OPCODE_NONEMPTY:
243 if ((msg_channel > ep->num_channels) ||
244 (msg_channel == 0) || (!msg_dir) ||
245 !ep->channels[msg_channel]->wr_supports_nonempty) {
246 malformed_message(&buf[i]);
247 break;
248 }
249
250 channel = ep->channels[msg_channel];
251
252 if (msg_bufno >= channel->num_wr_buffers) {
253 malformed_message(&buf[i]);
254 break;
255 }
256 spin_lock(&channel->wr_spinlock);
257 if (msg_bufno == channel->wr_host_buf_idx)
258 channel->wr_ready = 1;
259 spin_unlock(&channel->wr_spinlock);
260
261 wake_up_interruptible(&channel->wr_ready_wait);
262
263 break;
264 case XILLYMSG_OPCODE_QUIESCEACK:
265 ep->idtlen = msg_data;
266 wake_up_interruptible(&ep->ep_wait);
267
268 break;
269 case XILLYMSG_OPCODE_FIFOEOF:
270 channel = ep->channels[msg_channel];
271 spin_lock(&channel->wr_spinlock);
272 channel->wr_eof = msg_bufno;
273 channel->wr_sleepy = 0;
274
275 channel->wr_hangup = channel->wr_empty &&
276 (channel->wr_host_buf_idx == msg_bufno);
277
278 spin_unlock(&channel->wr_spinlock);
279
280 wake_up_interruptible(&channel->wr_wait);
281
282 break;
283 case XILLYMSG_OPCODE_FATAL_ERROR:
284 ep->fatal_error = 1;
285 wake_up_interruptible(&ep->ep_wait); /* For select() */
286 pr_err("xillybus: FPGA reported a fatal "
287 "error. This means that the low-level "
288 "communication with the device has failed. "
289 "This hardware problem is most likely "
290 "unrelated to xillybus (neither kernel "
291 "module nor FPGA core), but reports are "
292 "still welcome. All I/O is aborted.\n");
293 break;
294 default:
295 malformed_message(&buf[i]);
296 break;
297 }
298 }
299
7ee9ded2 300 ep->ephw->hw_sync_sgl_for_device(ep,
48bae050
EB
301 ep->msgbuf_dma_addr,
302 ep->msg_buf_size,
303 DMA_FROM_DEVICE);
304
305 ep->msg_counter = (ep->msg_counter + 1) & 0xf;
306 ep->failed_messages = 0;
307 iowrite32(0x03, &ep->registers[fpga_msg_ctrl_reg]); /* Message ACK */
308
309 return IRQ_HANDLED;
310}
311EXPORT_SYMBOL(xillybus_isr);
312
313/*
314 * A few trivial memory management functions.
315 * NOTE: These functions are used only on probe and remove, and therefore
316 * no locks are applied!
317 */
318
319void xillybus_do_cleanup(struct xilly_cleanup *mem,
320 struct xilly_endpoint *endpoint)
321{
322 struct list_head *this, *next;
323
324 list_for_each_safe(this, next, &mem->to_unmap) {
325 struct xilly_dma *entry =
326 list_entry(this, struct xilly_dma, node);
327
328 endpoint->ephw->unmap_single(entry);
329 kfree(entry);
330 }
331
332 INIT_LIST_HEAD(&mem->to_unmap);
333
334 list_for_each_safe(this, next, &mem->to_kfree)
335 kfree(this);
336
337 INIT_LIST_HEAD(&mem->to_kfree);
338
339 list_for_each_safe(this, next, &mem->to_pagefree) {
340 struct xilly_page *entry =
341 list_entry(this, struct xilly_page, node);
342
343 free_pages(entry->addr, entry->order);
344 kfree(entry);
345 }
346 INIT_LIST_HEAD(&mem->to_pagefree);
347}
348EXPORT_SYMBOL(xillybus_do_cleanup);
349
350static void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
351{
352 void *ptr;
353
354 ptr = kzalloc(sizeof(struct list_head) + size, GFP_KERNEL);
355
356 if (!ptr)
357 return ptr;
358
359 list_add_tail((struct list_head *) ptr, &mem->to_kfree);
360
361 return ptr + sizeof(struct list_head);
362}
363
364static unsigned long xilly_pagealloc(struct xilly_cleanup *mem,
365 unsigned long order)
366{
367 unsigned long addr;
368 struct xilly_page *this;
369
370 this = kmalloc(sizeof(struct xilly_page), GFP_KERNEL);
371 if (!this)
372 return 0;
373
374 addr = __get_free_pages(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, order);
375
376 if (!addr) {
377 kfree(this);
378 return 0;
379 }
380
381 this->addr = addr;
382 this->order = order;
383
384 list_add_tail(&this->node, &mem->to_pagefree);
385
386 return addr;
387}
388
389
390static void xillybus_autoflush(struct work_struct *work);
391
392static int xilly_setupchannels(struct xilly_endpoint *ep,
393 struct xilly_cleanup *mem,
394 unsigned char *chandesc,
395 int entries
396 )
397{
398 int i, entry, wr_nbuffer, rd_nbuffer;
399 struct xilly_channel *channel;
400 int channelnum, bufnum, bufsize, format, is_writebuf;
401 int bytebufsize;
402 int synchronous, allowpartial, exclusive_open, seekable;
403 int supports_nonempty;
404 void *wr_salami = NULL;
405 void *rd_salami = NULL;
406 int left_of_wr_salami = 0;
407 int left_of_rd_salami = 0;
408 dma_addr_t dma_addr;
409 int msg_buf_done = 0;
410
411 struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
412
413 channel = xilly_malloc(mem, ep->num_channels *
414 sizeof(struct xilly_channel));
415
416 if (!channel)
417 goto memfail;
418
419 ep->channels = xilly_malloc(mem, (ep->num_channels + 1) *
420 sizeof(struct xilly_channel *));
421
422 if (!ep->channels)
423 goto memfail;
424
425 ep->channels[0] = NULL; /* Channel 0 is message buf. */
426
427 /* Initialize all channels with defaults */
428
429 for (i = 1; i <= ep->num_channels; i++) {
430 channel->wr_buffers = NULL;
431 channel->rd_buffers = NULL;
432 channel->num_wr_buffers = 0;
433 channel->num_rd_buffers = 0;
434 channel->wr_fpga_buf_idx = -1;
435 channel->wr_host_buf_idx = 0;
436 channel->wr_host_buf_pos = 0;
437 channel->wr_empty = 1;
438 channel->wr_ready = 0;
439 channel->wr_sleepy = 1;
440 channel->rd_fpga_buf_idx = 0;
441 channel->rd_host_buf_idx = 0;
442 channel->rd_host_buf_pos = 0;
443 channel->rd_full = 0;
444 channel->wr_ref_count = 0;
445 channel->rd_ref_count = 0;
446
447 spin_lock_init(&channel->wr_spinlock);
448 spin_lock_init(&channel->rd_spinlock);
449 mutex_init(&channel->wr_mutex);
450 mutex_init(&channel->rd_mutex);
451 init_waitqueue_head(&channel->rd_wait);
452 init_waitqueue_head(&channel->wr_wait);
453 init_waitqueue_head(&channel->wr_ready_wait);
454
455 INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
456
457 channel->endpoint = ep;
458 channel->chan_num = i;
459
460 channel->log2_element_size = 0;
461
462 ep->channels[i] = channel++;
463 }
464
465 /*
466 * The DMA buffer address update is atomic on the FPGA, so even if
467 * it was in the middle of sending messages to some buffer, changing
468 * the address is safe, since the data will go to either of the
469 * buffers. Not that this situation should occur at all anyhow.
470 */
471
472 wr_nbuffer = 1;
473 rd_nbuffer = 1; /* Buffer zero isn't used at all */
474
475 for (entry = 0; entry < entries; entry++, chandesc += 4) {
476 is_writebuf = chandesc[0] & 0x01;
477 channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
478 format = (chandesc[1] >> 4) & 0x03;
479 allowpartial = (chandesc[1] >> 6) & 0x01;
480 synchronous = (chandesc[1] >> 7) & 0x01;
481 bufsize = 1 << (chandesc[2] & 0x1f);
482 bufnum = 1 << (chandesc[3] & 0x0f);
483 exclusive_open = (chandesc[2] >> 7) & 0x01;
484 seekable = (chandesc[2] >> 6) & 0x01;
485 supports_nonempty = (chandesc[2] >> 5) & 0x01;
486
487 if ((channelnum > ep->num_channels) ||
488 ((channelnum == 0) && !is_writebuf)) {
489 pr_err("xillybus: IDT requests channel out "
490 "of range. Aborting.\n");
491 return -ENODEV;
492 }
493
494 channel = ep->channels[channelnum]; /* NULL for msg channel */
495
496 bytebufsize = bufsize << 2; /* Overwritten just below */
497
498 if (!is_writebuf) {
499 channel->num_rd_buffers = bufnum;
500 channel->log2_element_size = ((format > 2) ?
501 2 : format);
502 bytebufsize = channel->rd_buf_size = bufsize *
503 (1 << channel->log2_element_size);
504 channel->rd_allow_partial = allowpartial;
505 channel->rd_synchronous = synchronous;
506 channel->rd_exclusive_open = exclusive_open;
507 channel->seekable = seekable;
508
509 channel->rd_buffers = xilly_malloc(
510 mem,
511 bufnum * sizeof(struct xilly_buffer *));
512
513 if (!channel->rd_buffers)
514 goto memfail;
515
516 this_buffer = xilly_malloc(
517 mem,
518 bufnum * sizeof(struct xilly_buffer));
519
520 if (!this_buffer)
521 goto memfail;
522 }
523
524 else if (channelnum > 0) {
525 channel->num_wr_buffers = bufnum;
526 channel->log2_element_size = ((format > 2) ?
527 2 : format);
528 bytebufsize = channel->wr_buf_size = bufsize *
529 (1 << channel->log2_element_size);
530
531 channel->seekable = seekable;
532 channel->wr_supports_nonempty = supports_nonempty;
533
534 channel->wr_allow_partial = allowpartial;
535 channel->wr_synchronous = synchronous;
536 channel->wr_exclusive_open = exclusive_open;
537
538 channel->wr_buffers = xilly_malloc(
539 mem,
540 bufnum * sizeof(struct xilly_buffer *));
541
542 if (!channel->wr_buffers)
543 goto memfail;
544
545 this_buffer = xilly_malloc(
546 mem,
547 bufnum * sizeof(struct xilly_buffer));
548
549 if (!this_buffer)
550 goto memfail;
551 }
552
553 /*
554 * Although daunting, we cut the chunks for read buffers
555 * from a different salami than the write buffers',
556 * possibly improving performance.
557 */
558
559 if (is_writebuf)
560 for (i = 0; i < bufnum; i++) {
561 /*
562 * Buffers are expected in descending
563 * byte-size order, so there is either
564 * enough for this buffer or none at all.
565 */
566 if ((left_of_wr_salami < bytebufsize) &&
567 (left_of_wr_salami > 0)) {
568 pr_err("xillybus: "
569 "Corrupt buffer allocation "
570 "in IDT. Aborting.\n");
571 return -ENODEV;
572 }
573
574 if (left_of_wr_salami == 0) {
575 int allocorder, allocsize;
576
577 allocsize = PAGE_SIZE;
578 allocorder = 0;
579 while (bytebufsize > allocsize) {
580 allocsize *= 2;
581 allocorder++;
582 }
583
584 wr_salami = (void *)
585 xilly_pagealloc(mem,
586 allocorder);
587 if (!wr_salami)
588 goto memfail;
589 left_of_wr_salami = allocsize;
590 }
591
592 dma_addr = ep->ephw->map_single(
593 mem,
594 ep,
595 wr_salami,
596 bytebufsize,
597 DMA_FROM_DEVICE);
598
599 if (!dma_addr)
600 goto dmafail;
601
602 iowrite32(
603 (u32) (dma_addr & 0xffffffff),
604 &ep->registers[
605 fpga_dma_bufaddr_lowaddr_reg]
606 );
607 iowrite32(
608 ((u32) ((((u64) dma_addr) >> 32)
609 & 0xffffffff)),
610 &ep->registers[
611 fpga_dma_bufaddr_highaddr_reg]
612 );
613 mmiowb();
614
615 if (channelnum > 0) {
616 this_buffer->addr = wr_salami;
617 this_buffer->dma_addr = dma_addr;
618 channel->wr_buffers[i] = this_buffer++;
619
620 iowrite32(
621 0x80000000 | wr_nbuffer++,
622 &ep->registers[
623 fpga_dma_bufno_reg]);
624 } else {
625 ep->msgbuf_addr = wr_salami;
626 ep->msgbuf_dma_addr = dma_addr;
627 ep->msg_buf_size = bytebufsize;
628 msg_buf_done++;
629
630 iowrite32(
631 0x80000000, &ep->registers[
632 fpga_dma_bufno_reg]);
633 }
634
635 left_of_wr_salami -= bytebufsize;
636 wr_salami += bytebufsize;
637 }
638 else /* Read buffers */
639 for (i = 0; i < bufnum; i++) {
640 /*
641 * Buffers are expected in descending
642 * byte-size order, so there is either
643 * enough for this buffer or none at all.
644 */
645 if ((left_of_rd_salami < bytebufsize) &&
646 (left_of_rd_salami > 0)) {
647 pr_err("xillybus: "
648 "Corrupt buffer allocation "
649 "in IDT. Aborting.\n");
650 return -ENODEV;
651 }
652
653 if (left_of_rd_salami == 0) {
654 int allocorder, allocsize;
655
656 allocsize = PAGE_SIZE;
657 allocorder = 0;
658 while (bytebufsize > allocsize) {
659 allocsize *= 2;
660 allocorder++;
661 }
662
663 rd_salami = (void *)
664 xilly_pagealloc(
665 mem,
666 allocorder);
667
668 if (!rd_salami)
669 goto memfail;
670 left_of_rd_salami = allocsize;
671 }
672
673 dma_addr = ep->ephw->map_single(
674 mem,
675 ep,
676 rd_salami,
677 bytebufsize,
678 DMA_TO_DEVICE);
679
680 if (!dma_addr)
681 goto dmafail;
682
683 iowrite32(
684 (u32) (dma_addr & 0xffffffff),
685 &ep->registers[
686 fpga_dma_bufaddr_lowaddr_reg]
687 );
688 iowrite32(
689 ((u32) ((((u64) dma_addr) >> 32)
690 & 0xffffffff)),
691 &ep->registers[
692 fpga_dma_bufaddr_highaddr_reg]
693 );
694 mmiowb();
695
696 this_buffer->addr = rd_salami;
697 this_buffer->dma_addr = dma_addr;
698 channel->rd_buffers[i] = this_buffer++;
699
700 iowrite32(rd_nbuffer++,
701 &ep->registers[fpga_dma_bufno_reg]);
702
703 left_of_rd_salami -= bytebufsize;
704 rd_salami += bytebufsize;
705 }
706 }
707
708 if (!msg_buf_done) {
709 pr_err("xillybus: Corrupt IDT: No message buffer. "
710 "Aborting.\n");
711 return -ENODEV;
712 }
713
714 return 0;
715
716memfail:
717 pr_err("xillybus: Failed to allocate write buffer memory. "
718 "Aborting.\n");
719 return -ENOMEM;
720dmafail:
721 pr_err("xillybus: Failed to map DMA memory!. Aborting.\n");
722 return -ENOMEM;
723}
724
725static void xilly_scan_idt(struct xilly_endpoint *endpoint,
726 struct xilly_idt_handle *idt_handle)
727{
728 int count = 0;
729 unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
730 unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
731 unsigned char *scan;
732 int len;
733
734 scan = idt;
735 idt_handle->idt = idt;
736
737 scan++; /* Skip version number */
738
739 while ((scan <= end_of_idt) && *scan) {
740 while ((scan <= end_of_idt) && *scan++)
741 /* Do nothing, just scan thru string */;
742 count++;
743 }
744
745 scan++;
746
747 if (scan > end_of_idt) {
748 pr_err("xillybus: IDT device name list overflow. "
749 "Aborting.\n");
750 idt_handle->chandesc = NULL;
751 return;
752 } else
753 idt_handle->chandesc = scan;
754
755 len = endpoint->idtlen - (3 + ((int) (scan - idt)));
756
757 if (len & 0x03) {
758 idt_handle->chandesc = NULL;
759
760 pr_err("xillybus: Corrupt IDT device name list. "
761 "Aborting.\n");
762 }
763
764 idt_handle->entries = len >> 2;
765
766 endpoint->num_channels = count;
767}
768
769static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
770{
771 int rc = 0;
772 struct xilly_channel *channel;
773 unsigned char *version;
774
775 channel = endpoint->channels[1]; /* This should be generated ad-hoc */
776
777 channel->wr_sleepy = 1;
778 wmb(); /* Setting wr_sleepy must come before the command */
779
780 iowrite32(1 |
781 (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
782 &endpoint->registers[fpga_buf_ctrl_reg]);
783 mmiowb(); /* Just to appear safe */
784
785 wait_event_interruptible_timeout(channel->wr_wait,
786 (!channel->wr_sleepy),
787 XILLY_TIMEOUT);
788
789 if (channel->wr_sleepy) {
790 pr_err("xillybus: Failed to obtain IDT. Aborting.\n");
791
792 if (endpoint->fatal_error)
793 return -EIO;
794
795 rc = -ENODEV;
796 return rc;
797 }
798
7ee9ded2 799 endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
800 channel->endpoint,
801 channel->wr_buffers[0]->dma_addr,
802 channel->wr_buf_size,
803 DMA_FROM_DEVICE);
804
805 if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
806 pr_err("xillybus: IDT length mismatch (%d != %d). "
807 "Aborting.\n",
808 channel->wr_buffers[0]->end_offset, endpoint->idtlen);
809 rc = -ENODEV;
810 return rc;
811 }
812
813 if (crc32_le(~0, channel->wr_buffers[0]->addr,
814 endpoint->idtlen+1) != 0) {
815 pr_err("xillybus: IDT failed CRC check. Aborting.\n");
816 rc = -ENODEV;
817 return rc;
818 }
819
820 version = channel->wr_buffers[0]->addr;
821
822 /* Check version number. Accept anything below 0x82 for now. */
823 if (*version > 0x82) {
824 pr_err("xillybus: No support for IDT version 0x%02x. "
825 "Maybe the xillybus driver needs an upgarde. "
826 "Aborting.\n",
827 (int) *version);
828 rc = -ENODEV;
829 return rc;
830 }
831
832 return 0; /* Success */
833}
834
7ee9ded2
EB
835static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
836 size_t count, loff_t *f_pos)
48bae050
EB
837{
838 ssize_t rc;
839 unsigned long flags;
840 int bytes_done = 0;
841 int no_time_left = 0;
842 long deadline, left_to_sleep;
843 struct xilly_channel *channel = filp->private_data;
844
845 int empty, reached_eof, exhausted, ready;
846 /* Initializations are there only to silence warnings */
847
848 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
849 int waiting_bufidx;
850
851 if (channel->endpoint->fatal_error)
852 return -EIO;
853
854 deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
855
856 rc = mutex_lock_interruptible(&channel->wr_mutex);
857
858 if (rc)
859 return rc;
860
861 rc = 0; /* Just to be clear about it. Compiler optimizes this out */
862
863 while (1) { /* Note that we may drop mutex within this loop */
864 int bytes_to_do = count - bytes_done;
865 spin_lock_irqsave(&channel->wr_spinlock, flags);
866
867 empty = channel->wr_empty;
868 ready = !empty || channel->wr_ready;
869
870 if (!empty) {
871 bufidx = channel->wr_host_buf_idx;
872 bufpos = channel->wr_host_buf_pos;
873 howmany = ((channel->wr_buffers[bufidx]->end_offset
874 + 1) << channel->log2_element_size)
875 - bufpos;
876
877 /* Update wr_host_* to its post-operation state */
878 if (howmany > bytes_to_do) {
879 bufferdone = 0;
880
881 howmany = bytes_to_do;
882 channel->wr_host_buf_pos += howmany;
883 } else {
884 bufferdone = 1;
885
886 channel->wr_host_buf_pos = 0;
887
888 if (bufidx == channel->wr_fpga_buf_idx) {
889 channel->wr_empty = 1;
890 channel->wr_sleepy = 1;
891 channel->wr_ready = 0;
892 }
893
894 if (bufidx >= (channel->num_wr_buffers - 1))
895 channel->wr_host_buf_idx = 0;
896 else
897 channel->wr_host_buf_idx++;
898 }
899 }
900
901 /*
902 * Marking our situation after the possible changes above,
903 * for use after releasing the spinlock.
904 *
905 * empty = empty before change
906 * exhasted = empty after possible change
907 */
908
909 reached_eof = channel->wr_empty &&
910 (channel->wr_host_buf_idx == channel->wr_eof);
911 channel->wr_hangup = reached_eof;
912 exhausted = channel->wr_empty;
913 waiting_bufidx = channel->wr_host_buf_idx;
914
915 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
916
917 if (!empty) { /* Go on, now without the spinlock */
918
919 if (bufpos == 0) /* Position zero means it's virgin */
7ee9ded2 920 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
921 channel->endpoint,
922 channel->wr_buffers[bufidx]->dma_addr,
923 channel->wr_buf_size,
924 DMA_FROM_DEVICE);
925
926 if (copy_to_user(
927 userbuf,
928 channel->wr_buffers[bufidx]->addr
929 + bufpos, howmany))
930 rc = -EFAULT;
931
932 userbuf += howmany;
933 bytes_done += howmany;
934
935 if (bufferdone) {
936 channel->endpoint->ephw->
7ee9ded2 937 hw_sync_sgl_for_device
48bae050
EB
938 (
939 channel->endpoint,
940 channel->wr_buffers[bufidx]->
941 dma_addr,
942 channel->wr_buf_size,
943 DMA_FROM_DEVICE);
944
945 /*
946 * Tell FPGA the buffer is done with. It's an
947 * atomic operation to the FPGA, so what
948 * happens with other channels doesn't matter,
949 * and the certain channel is protected with
950 * the channel-specific mutex.
951 */
952
953 iowrite32(1 | (channel->chan_num << 1)
954 | (bufidx << 12),
955 &channel->endpoint->registers[
956 fpga_buf_ctrl_reg]);
957 mmiowb(); /* Just to appear safe */
958 }
959
960 if (rc) {
961 mutex_unlock(&channel->wr_mutex);
962 return rc;
963 }
964 }
965
966 /* This includes a zero-count return = EOF */
967 if ((bytes_done >= count) || reached_eof)
968 break;
969
970 if (!exhausted)
971 continue; /* More in RAM buffer(s)? Just go on. */
972
973 if ((bytes_done > 0) &&
974 (no_time_left ||
975 (channel->wr_synchronous && channel->wr_allow_partial)))
976 break;
977
978 /*
979 * Nonblocking read: The "ready" flag tells us that the FPGA
980 * has data to send. In non-blocking mode, if it isn't on,
981 * just return. But if there is, we jump directly to the point
982 * where we ask for the FPGA to send all it has, and wait
983 * until that data arrives. So in a sense, we *do* block in
984 * nonblocking mode, but only for a very short time.
985 */
986
987 if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
988 if (bytes_done > 0)
989 break;
990
991 if (ready)
992 goto desperate;
993
994 bytes_done = -EAGAIN;
995 break;
996 }
997
998 if (!no_time_left || (bytes_done > 0)) {
999 /*
1000 * Note that in case of an element-misaligned read
1001 * request, offsetlimit will include the last element,
1002 * which will be partially read from.
1003 */
1004 int offsetlimit = ((count - bytes_done) - 1) >>
1005 channel->log2_element_size;
1006 int buf_elements = channel->wr_buf_size >>
1007 channel->log2_element_size;
1008
1009 /*
1010 * In synchronous mode, always send an offset limit.
1011 * Just don't send a value too big.
1012 */
1013
1014 if (channel->wr_synchronous) {
1015 /* Don't request more than one buffer */
1016 if (channel->wr_allow_partial &&
1017 (offsetlimit >= buf_elements))
1018 offsetlimit = buf_elements - 1;
1019
1020 /* Don't request more than all buffers */
1021 if (!channel->wr_allow_partial &&
1022 (offsetlimit >=
1023 (buf_elements * channel->num_wr_buffers)))
1024 offsetlimit = buf_elements *
1025 channel->num_wr_buffers - 1;
1026 }
1027
1028 /*
1029 * In asynchronous mode, force early flush of a buffer
1030 * only if that will allow returning a full count. The
1031 * "offsetlimit < ( ... )" rather than "<=" excludes
1032 * requesting a full buffer, which would obviously
1033 * cause a buffer transmission anyhow
1034 */
1035
1036 if (channel->wr_synchronous ||
1037 (offsetlimit < (buf_elements - 1))) {
1038
1039 mutex_lock(&channel->endpoint->register_mutex);
1040
1041 iowrite32(offsetlimit,
1042 &channel->endpoint->registers[
1043 fpga_buf_offset_reg]);
1044 mmiowb();
1045
1046 iowrite32(1 | (channel->chan_num << 1) |
1047 (2 << 24) | /* 2 = offset limit */
1048 (waiting_bufidx << 12),
1049 &channel->endpoint->registers[
1050 fpga_buf_ctrl_reg]);
1051
1052 mmiowb(); /* Just to appear safe */
1053
1054 mutex_unlock(&channel->endpoint->
1055 register_mutex);
1056 }
1057
1058 }
1059
1060 /*
1061 * If partial completion is disallowed, there is no point in
1062 * timeout sleeping. Neither if no_time_left is set and
1063 * there's no data.
1064 */
1065
1066 if (!channel->wr_allow_partial ||
1067 (no_time_left && (bytes_done == 0))) {
1068
1069 /*
1070 * This do-loop will run more than once if another
1071 * thread reasserted wr_sleepy before we got the mutex
1072 * back, so we try again.
1073 */
1074
1075 do {
1076 mutex_unlock(&channel->wr_mutex);
1077
1078 if (wait_event_interruptible(
1079 channel->wr_wait,
1080 (!channel->wr_sleepy)))
1081 goto interrupted;
1082
1083 if (mutex_lock_interruptible(
1084 &channel->wr_mutex))
1085 goto interrupted;
1086 } while (channel->wr_sleepy);
1087
1088 continue;
1089
1090interrupted: /* Mutex is not held if got here */
1091 if (channel->endpoint->fatal_error)
1092 return -EIO;
1093 if (bytes_done)
1094 return bytes_done;
1095 if (filp->f_flags & O_NONBLOCK)
1096 return -EAGAIN; /* Don't admit snoozing */
1097 return -EINTR;
1098 }
1099
1100 left_to_sleep = deadline - ((long) jiffies);
1101
1102 /*
1103 * If our time is out, skip the waiting. We may miss wr_sleepy
1104 * being deasserted but hey, almost missing the train is like
1105 * missing it.
1106 */
1107
1108 if (left_to_sleep > 0) {
1109 left_to_sleep =
1110 wait_event_interruptible_timeout(
1111 channel->wr_wait,
1112 (!channel->wr_sleepy),
1113 left_to_sleep);
1114
1115 if (!channel->wr_sleepy)
1116 continue;
1117
1118 if (left_to_sleep < 0) { /* Interrupt */
1119 mutex_unlock(&channel->wr_mutex);
1120 if (channel->endpoint->fatal_error)
1121 return -EIO;
1122 if (bytes_done)
1123 return bytes_done;
1124 return -EINTR;
1125 }
1126 }
1127
1128desperate:
1129 no_time_left = 1; /* We're out of sleeping time. Desperate! */
1130
1131 if (bytes_done == 0) {
1132 /*
1133 * Reaching here means that we allow partial return,
1134 * that we've run out of time, and that we have
1135 * nothing to return.
1136 * So tell the FPGA to send anything it has or gets.
1137 */
1138
1139 iowrite32(1 | (channel->chan_num << 1) |
1140 (3 << 24) | /* Opcode 3, flush it all! */
1141 (waiting_bufidx << 12),
1142 &channel->endpoint->registers[
1143 fpga_buf_ctrl_reg]);
1144 mmiowb(); /* Just to appear safe */
1145 }
1146
1147 /*
1148 * Formally speaking, we should block for data at this point.
1149 * But to keep the code cleaner, we'll just finish the loop,
1150 * make the unlikely check for data, and then block at the
1151 * usual place.
1152 */
1153 }
1154
1155 mutex_unlock(&channel->wr_mutex);
1156
1157 if (channel->endpoint->fatal_error)
1158 return -EIO;
1159
1160 return bytes_done;
1161}
1162
1163/*
1164 * The timeout argument takes values as follows:
1165 * >0 : Flush with timeout
1166 * ==0 : Flush, and wait idefinitely for the flush to complete
1167 * <0 : Autoflush: Flush only if there's a single buffer occupied
1168 */
1169
1170static int xillybus_myflush(struct xilly_channel *channel, long timeout)
1171{
1172 int rc = 0;
1173 unsigned long flags;
1174
1175 int end_offset_plus1;
1176 int bufidx, bufidx_minus1;
1177 int i;
1178 int empty;
1179 int new_rd_host_buf_pos;
1180
1181 if (channel->endpoint->fatal_error)
1182 return -EIO;
1183 rc = mutex_lock_interruptible(&channel->rd_mutex);
1184
1185 if (rc)
1186 return rc;
1187
1188 /*
1189 * Don't flush a closed channel. This can happen when the work queued
1190 * autoflush thread fires off after the file has closed. This is not
1191 * an error, just something to dismiss.
1192 */
1193
1194 if (!channel->rd_ref_count)
1195 goto done;
1196
1197 bufidx = channel->rd_host_buf_idx;
1198
1199 bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx-1;
1200
1201 end_offset_plus1 = channel->rd_host_buf_pos >>
1202 channel->log2_element_size;
1203
1204 new_rd_host_buf_pos = channel->rd_host_buf_pos -
1205 (end_offset_plus1 << channel->log2_element_size);
1206
1207 /* Submit the current buffer if it's nonempty */
1208 if (end_offset_plus1) {
1209 unsigned char *tail = channel->rd_buffers[bufidx]->addr +
1210 (end_offset_plus1 << channel->log2_element_size);
1211
1212 /* Copy unflushed data, so we can put it in next buffer */
1213 for (i = 0; i < new_rd_host_buf_pos; i++)
1214 channel->rd_leftovers[i] = *tail++;
1215
1216 spin_lock_irqsave(&channel->rd_spinlock, flags);
1217
1218 /* Autoflush only if a single buffer is occupied */
1219
1220 if ((timeout < 0) &&
1221 (channel->rd_full ||
1222 (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
1223 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1224 /*
1225 * A new work item may be queued by the ISR exactly
1226 * now, since the execution of a work item allows the
1227 * queuing of a new one while it's running.
1228 */
1229 goto done;
1230 }
1231
1232 /* The 4th element is never needed for data, so it's a flag */
1233 channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
1234
1235 /* Set up rd_full to reflect a certain moment's state */
1236
1237 if (bufidx == channel->rd_fpga_buf_idx)
1238 channel->rd_full = 1;
1239 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1240
1241 if (bufidx >= (channel->num_rd_buffers - 1))
1242 channel->rd_host_buf_idx = 0;
1243 else
1244 channel->rd_host_buf_idx++;
1245
7ee9ded2 1246 channel->endpoint->ephw->hw_sync_sgl_for_device(
48bae050
EB
1247 channel->endpoint,
1248 channel->rd_buffers[bufidx]->dma_addr,
1249 channel->rd_buf_size,
1250 DMA_TO_DEVICE);
1251
1252 mutex_lock(&channel->endpoint->register_mutex);
1253
1254 iowrite32(end_offset_plus1 - 1,
1255 &channel->endpoint->registers[fpga_buf_offset_reg]);
1256 mmiowb();
1257
1258 iowrite32((channel->chan_num << 1) | /* Channel ID */
1259 (2 << 24) | /* Opcode 2, submit buffer */
1260 (bufidx << 12),
1261 &channel->endpoint->registers[fpga_buf_ctrl_reg]);
1262 mmiowb(); /* Just to appear safe */
1263
1264 mutex_unlock(&channel->endpoint->register_mutex);
1265 } else if (bufidx == 0)
1266 bufidx = channel->num_rd_buffers - 1;
1267 else
1268 bufidx--;
1269
1270 channel->rd_host_buf_pos = new_rd_host_buf_pos;
1271
1272 if (timeout < 0)
1273 goto done; /* Autoflush */
1274
1275
1276 /*
1277 * bufidx is now the last buffer written to (or equal to
1278 * rd_fpga_buf_idx if buffer was never written to), and
1279 * channel->rd_host_buf_idx the one after it.
1280 *
1281 * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
1282 */
1283
1284 rc = 0;
1285
1286 while (1) { /* Loop waiting for draining of buffers */
1287 spin_lock_irqsave(&channel->rd_spinlock, flags);
1288
1289 if (bufidx != channel->rd_fpga_buf_idx)
1290 channel->rd_full = 1; /*
1291 * Not really full,
1292 * but needs waiting.
1293 */
1294
1295 empty = !channel->rd_full;
1296
1297 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1298
1299 if (empty)
1300 break;
1301
1302 /*
1303 * Indefinite sleep with mutex taken. With data waiting for
1304 * flushing user should not be surprised if open() for write
1305 * sleeps.
1306 */
1307 if (timeout == 0)
1308 wait_event_interruptible(channel->rd_wait,
1309 (!channel->rd_full));
1310
1311 else if (wait_event_interruptible_timeout(
1312 channel->rd_wait,
1313 (!channel->rd_full),
1314 timeout) == 0) {
1315 pr_warn("xillybus: "
1316 "Timed out while flushing. "
1317 "Output data may be lost.\n");
1318
1319 rc = -ETIMEDOUT;
1320 break;
1321 }
1322
1323 if (channel->rd_full) {
1324 rc = -EINTR;
1325 break;
1326 }
1327 }
1328
1329done:
1330 mutex_unlock(&channel->rd_mutex);
1331
1332 if (channel->endpoint->fatal_error)
1333 return -EIO;
1334
1335 return rc;
1336}
1337
1338static int xillybus_flush(struct file *filp, fl_owner_t id)
1339{
1340 if (!(filp->f_mode & FMODE_WRITE))
1341 return 0;
1342
1343 return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
1344}
1345
1346static void xillybus_autoflush(struct work_struct *work)
1347{
1348 struct delayed_work *workitem = container_of(
1349 work, struct delayed_work, work);
1350 struct xilly_channel *channel = container_of(
1351 workitem, struct xilly_channel, rd_workitem);
1352 int rc;
1353
1354 rc = xillybus_myflush(channel, -1);
1355
1356 if (rc == -EINTR)
1357 pr_warn("xillybus: Autoflush failed because "
1358 "work queue thread got a signal.\n");
1359 else if (rc)
1360 pr_err("xillybus: Autoflush failed under "
1361 "weird circumstances.\n");
1362
1363}
1364
7ee9ded2 1365static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
48bae050
EB
1366 size_t count, loff_t *f_pos)
1367{
1368 ssize_t rc;
1369 unsigned long flags;
1370 int bytes_done = 0;
1371 struct xilly_channel *channel = filp->private_data;
1372
1373 int full, exhausted;
1374 /* Initializations are there only to silence warnings */
1375
1376 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
1377 int end_offset_plus1 = 0;
1378
1379 if (channel->endpoint->fatal_error)
1380 return -EIO;
1381
1382 rc = mutex_lock_interruptible(&channel->rd_mutex);
1383
1384 if (rc)
1385 return rc;
1386
1387 rc = 0; /* Just to be clear about it. Compiler optimizes this out */
1388
1389 while (1) {
1390 int bytes_to_do = count - bytes_done;
1391
1392 spin_lock_irqsave(&channel->rd_spinlock, flags);
1393
1394 full = channel->rd_full;
1395
1396 if (!full) {
1397 bufidx = channel->rd_host_buf_idx;
1398 bufpos = channel->rd_host_buf_pos;
1399 howmany = channel->rd_buf_size - bufpos;
1400
1401 /*
1402 * Update rd_host_* to its state after this operation.
1403 * count=0 means committing the buffer immediately,
1404 * which is like flushing, but not necessarily block.
1405 */
1406
1407 if ((howmany > bytes_to_do) &&
1408 (count ||
1409 ((bufpos >> channel->log2_element_size) == 0))) {
1410 bufferdone = 0;
1411
1412 howmany = bytes_to_do;
1413 channel->rd_host_buf_pos += howmany;
1414 } else {
1415 bufferdone = 1;
1416
1417 if (count) {
1418 end_offset_plus1 =
1419 channel->rd_buf_size >>
1420 channel->log2_element_size;
1421 channel->rd_host_buf_pos = 0;
1422 } else {
1423 unsigned char *tail;
1424 int i;
1425
1426 end_offset_plus1 = bufpos >>
1427 channel->log2_element_size;
1428
1429 channel->rd_host_buf_pos -=
1430 end_offset_plus1 <<
1431 channel->log2_element_size;
1432
1433 tail = channel->
1434 rd_buffers[bufidx]->addr +
1435 (end_offset_plus1 <<
1436 channel->log2_element_size);
1437
1438 for (i = 0;
1439 i < channel->rd_host_buf_pos;
1440 i++)
1441 channel->rd_leftovers[i] =
1442 *tail++;
1443 }
1444
1445 if (bufidx == channel->rd_fpga_buf_idx)
1446 channel->rd_full = 1;
1447
1448 if (bufidx >= (channel->num_rd_buffers - 1))
1449 channel->rd_host_buf_idx = 0;
1450 else
1451 channel->rd_host_buf_idx++;
1452 }
1453 }
1454
1455 /*
1456 * Marking our situation after the possible changes above,
1457 * for use after releasing the spinlock.
1458 *
1459 * full = full before change
1460 * exhasted = full after possible change
1461 */
1462
1463 exhausted = channel->rd_full;
1464
1465 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1466
1467 if (!full) { /* Go on, now without the spinlock */
1468 unsigned char *head =
1469 channel->rd_buffers[bufidx]->addr;
1470 int i;
1471
1472 if ((bufpos == 0) || /* Zero means it's virgin */
1473 (channel->rd_leftovers[3] != 0)) {
7ee9ded2 1474 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
1475 channel->endpoint,
1476 channel->rd_buffers[bufidx]->dma_addr,
1477 channel->rd_buf_size,
1478 DMA_TO_DEVICE);
1479
1480 /* Virgin, but leftovers are due */
1481 for (i = 0; i < bufpos; i++)
1482 *head++ = channel->rd_leftovers[i];
1483
1484 channel->rd_leftovers[3] = 0; /* Clear flag */
1485 }
1486
1487 if (copy_from_user(
1488 channel->rd_buffers[bufidx]->addr + bufpos,
1489 userbuf, howmany))
1490 rc = -EFAULT;
1491
1492 userbuf += howmany;
1493 bytes_done += howmany;
1494
1495 if (bufferdone) {
1496 channel->endpoint->ephw->
7ee9ded2 1497 hw_sync_sgl_for_device(
48bae050
EB
1498 channel->endpoint,
1499 channel->rd_buffers[bufidx]->
1500 dma_addr,
1501 channel->rd_buf_size,
1502 DMA_TO_DEVICE);
1503
1504 mutex_lock(&channel->endpoint->register_mutex);
1505
1506 iowrite32(end_offset_plus1 - 1,
1507 &channel->endpoint->registers[
1508 fpga_buf_offset_reg]);
1509 mmiowb();
1510 iowrite32((channel->chan_num << 1) |
1511 (2 << 24) | /* 2 = submit buffer */
1512 (bufidx << 12),
1513 &channel->endpoint->registers[
1514 fpga_buf_ctrl_reg]);
1515 mmiowb(); /* Just to appear safe */
1516
1517 mutex_unlock(&channel->endpoint->
1518 register_mutex);
1519
1520 channel->rd_leftovers[3] =
1521 (channel->rd_host_buf_pos != 0);
1522 }
1523
1524 if (rc) {
1525 mutex_unlock(&channel->rd_mutex);
1526
1527 if (channel->endpoint->fatal_error)
1528 return -EIO;
1529
1530 if (!channel->rd_synchronous)
1531 queue_delayed_work(
1532 xillybus_wq,
1533 &channel->rd_workitem,
1534 XILLY_RX_TIMEOUT);
1535
1536 return rc;
1537 }
1538 }
1539
1540 if (bytes_done >= count)
1541 break;
1542
1543 if (!exhausted)
1544 continue; /* If there's more space, just go on */
1545
1546 if ((bytes_done > 0) && channel->rd_allow_partial)
1547 break;
1548
1549 /*
1550 * Indefinite sleep with mutex taken. With data waiting for
1551 * flushing, user should not be surprised if open() for write
1552 * sleeps.
1553 */
1554
1555 if (filp->f_flags & O_NONBLOCK) {
1556 bytes_done = -EAGAIN;
1557 break;
1558 }
1559
1560 wait_event_interruptible(channel->rd_wait,
1561 (!channel->rd_full));
1562
1563 if (channel->rd_full) {
1564 mutex_unlock(&channel->rd_mutex);
1565
1566 if (channel->endpoint->fatal_error)
1567 return -EIO;
1568
1569 if (bytes_done)
1570 return bytes_done;
1571 return -EINTR;
1572 }
1573 }
1574
1575 mutex_unlock(&channel->rd_mutex);
1576
1577 if (!channel->rd_synchronous)
1578 queue_delayed_work(xillybus_wq,
1579 &channel->rd_workitem,
1580 XILLY_RX_TIMEOUT);
1581
1582 if ((channel->rd_synchronous) && (bytes_done > 0)) {
1583 rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
1584
1585 if (rc && (rc != -EINTR))
1586 return rc;
1587 }
1588
1589 if (channel->endpoint->fatal_error)
1590 return -EIO;
1591
1592 return bytes_done;
1593}
1594
1595static int xillybus_open(struct inode *inode, struct file *filp)
1596{
1597 int rc = 0;
1598 unsigned long flags;
1599 int minor = iminor(inode);
1600 int major = imajor(inode);
1601 struct xilly_endpoint *ep_iter, *endpoint = NULL;
1602 struct xilly_channel *channel;
1603
1604 mutex_lock(&ep_list_lock);
1605
1606 list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
1607 if ((ep_iter->major == major) &&
1608 (minor >= ep_iter->lowest_minor) &&
1609 (minor < (ep_iter->lowest_minor +
1610 ep_iter->num_channels))) {
1611 endpoint = ep_iter;
1612 break;
1613 }
1614 }
1615 mutex_unlock(&ep_list_lock);
1616
1617 if (!endpoint) {
1618 pr_err("xillybus: open() failed to find a device "
1619 "for major=%d and minor=%d\n", major, minor);
1620 return -ENODEV;
1621 }
1622
1623 if (endpoint->fatal_error)
1624 return -EIO;
1625
1626 channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
1627 filp->private_data = channel;
1628
1629
1630 /*
1631 * It gets complicated because:
1632 * 1. We don't want to take a mutex we don't have to
1633 * 2. We don't want to open one direction if the other will fail.
1634 */
1635
1636 if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
1637 return -ENODEV;
1638
1639 if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
1640 return -ENODEV;
1641
1642 if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
1643 (channel->wr_synchronous || !channel->wr_allow_partial ||
1644 !channel->wr_supports_nonempty)) {
1645 pr_err("xillybus: open() failed: "
1646 "O_NONBLOCK not allowed for read on this device\n");
1647 return -ENODEV;
1648 }
1649
1650 if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
1651 (channel->rd_synchronous || !channel->rd_allow_partial)) {
1652 pr_err("xillybus: open() failed: "
1653 "O_NONBLOCK not allowed for write on this device\n");
1654 return -ENODEV;
1655 }
1656
1657 /*
1658 * Note: open() may block on getting mutexes despite O_NONBLOCK.
1659 * This shouldn't occur normally, since multiple open of the same
1660 * file descriptor is almost always prohibited anyhow
1661 * (*_exclusive_open is normally set in real-life systems).
1662 */
1663
1664 if (filp->f_mode & FMODE_READ) {
1665 rc = mutex_lock_interruptible(&channel->wr_mutex);
1666 if (rc)
1667 return rc;
1668 }
1669
1670 if (filp->f_mode & FMODE_WRITE) {
1671 rc = mutex_lock_interruptible(&channel->rd_mutex);
1672 if (rc)
1673 goto unlock_wr;
1674 }
1675
1676 if ((filp->f_mode & FMODE_READ) &&
1677 (channel->wr_ref_count != 0) &&
1678 (channel->wr_exclusive_open)) {
1679 rc = -EBUSY;
1680 goto unlock;
1681 }
1682
1683 if ((filp->f_mode & FMODE_WRITE) &&
1684 (channel->rd_ref_count != 0) &&
1685 (channel->rd_exclusive_open)) {
1686 rc = -EBUSY;
1687 goto unlock;
1688 }
1689
1690
1691 if (filp->f_mode & FMODE_READ) {
1692 if (channel->wr_ref_count == 0) { /* First open of file */
1693 /* Move the host to first buffer */
1694 spin_lock_irqsave(&channel->wr_spinlock, flags);
1695 channel->wr_host_buf_idx = 0;
1696 channel->wr_host_buf_pos = 0;
1697 channel->wr_fpga_buf_idx = -1;
1698 channel->wr_empty = 1;
1699 channel->wr_ready = 0;
1700 channel->wr_sleepy = 1;
1701 channel->wr_eof = -1;
1702 channel->wr_hangup = 0;
1703
1704 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1705
1706 iowrite32(1 | (channel->chan_num << 1) |
1707 (4 << 24) | /* Opcode 4, open channel */
1708 ((channel->wr_synchronous & 1) << 23),
1709 &channel->endpoint->registers[
1710 fpga_buf_ctrl_reg]);
1711 mmiowb(); /* Just to appear safe */
1712 }
1713
1714 channel->wr_ref_count++;
1715 }
1716
1717 if (filp->f_mode & FMODE_WRITE) {
1718 if (channel->rd_ref_count == 0) { /* First open of file */
1719 /* Move the host to first buffer */
1720 spin_lock_irqsave(&channel->rd_spinlock, flags);
1721 channel->rd_host_buf_idx = 0;
1722 channel->rd_host_buf_pos = 0;
1723 channel->rd_leftovers[3] = 0; /* No leftovers. */
1724 channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
1725 channel->rd_full = 0;
1726
1727 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1728
1729 iowrite32((channel->chan_num << 1) |
1730 (4 << 24), /* Opcode 4, open channel */
1731 &channel->endpoint->registers[
1732 fpga_buf_ctrl_reg]);
1733 mmiowb(); /* Just to appear safe */
1734 }
1735
1736 channel->rd_ref_count++;
1737 }
1738
1739unlock:
1740 if (filp->f_mode & FMODE_WRITE)
1741 mutex_unlock(&channel->rd_mutex);
1742unlock_wr:
1743 if (filp->f_mode & FMODE_READ)
1744 mutex_unlock(&channel->wr_mutex);
1745
1746 if (!rc && (!channel->seekable))
1747 return nonseekable_open(inode, filp);
1748
1749 return rc;
1750}
1751
1752static int xillybus_release(struct inode *inode, struct file *filp)
1753{
1754 int rc;
1755 unsigned long flags;
1756 struct xilly_channel *channel = filp->private_data;
1757
1758 int buf_idx;
1759 int eof;
1760
1761 if (channel->endpoint->fatal_error)
1762 return -EIO;
1763
1764 if (filp->f_mode & FMODE_WRITE) {
1765 rc = mutex_lock_interruptible(&channel->rd_mutex);
1766
1767 if (rc) {
1768 pr_warn("xillybus: Failed to close file. "
1769 "Hardware left in messy state.\n");
1770 return rc;
1771 }
1772
1773 channel->rd_ref_count--;
1774
1775 if (channel->rd_ref_count == 0) {
1776
1777 /*
1778 * We rely on the kernel calling flush()
1779 * before we get here.
1780 */
1781
1782 iowrite32((channel->chan_num << 1) | /* Channel ID */
1783 (5 << 24), /* Opcode 5, close channel */
1784 &channel->endpoint->registers[
1785 fpga_buf_ctrl_reg]);
1786 mmiowb(); /* Just to appear safe */
1787 }
1788 mutex_unlock(&channel->rd_mutex);
1789 }
1790
1791 if (filp->f_mode & FMODE_READ) {
1792 rc = mutex_lock_interruptible(&channel->wr_mutex);
1793 if (rc) {
1794 pr_warn("xillybus: Failed to close file. "
1795 "Hardware left in messy state.\n");
1796 return rc;
1797 }
1798
1799 channel->wr_ref_count--;
1800
1801 if (channel->wr_ref_count == 0) {
1802
1803 iowrite32(1 | (channel->chan_num << 1) |
1804 (5 << 24), /* Opcode 5, close channel */
1805 &channel->endpoint->registers[
1806 fpga_buf_ctrl_reg]);
1807 mmiowb(); /* Just to appear safe */
1808
1809 /*
1810 * This is crazily cautious: We make sure that not
1811 * only that we got an EOF (be it because we closed
1812 * the channel or because of a user's EOF), but verify
1813 * that it's one beyond the last buffer arrived, so
1814 * we have no leftover buffers pending before wrapping
1815 * up (which can only happen in asynchronous channels,
1816 * BTW)
1817 */
1818
1819 while (1) {
1820 spin_lock_irqsave(&channel->wr_spinlock,
1821 flags);
1822 buf_idx = channel->wr_fpga_buf_idx;
1823 eof = channel->wr_eof;
1824 channel->wr_sleepy = 1;
1825 spin_unlock_irqrestore(&channel->wr_spinlock,
1826 flags);
1827
1828 /*
1829 * Check if eof points at the buffer after
1830 * the last one the FPGA submitted. Note that
1831 * no EOF is marked by negative eof.
1832 */
1833
1834 buf_idx++;
1835 if (buf_idx == channel->num_wr_buffers)
1836 buf_idx = 0;
1837
1838 if (buf_idx == eof)
1839 break;
1840
1841 /*
1842 * Steal extra 100 ms if awaken by interrupt.
1843 * This is a simple workaround for an
1844 * interrupt pending when entering, which would
1845 * otherwise result in declaring the hardware
1846 * non-responsive.
1847 */
1848
1849 if (wait_event_interruptible(
1850 channel->wr_wait,
1851 (!channel->wr_sleepy)))
1852 msleep(100);
1853
1854 if (channel->wr_sleepy) {
1855 mutex_unlock(&channel->wr_mutex);
1856 pr_warn("xillybus: Hardware failed to "
1857 "respond to close command, "
1858 "therefore left in "
1859 "messy state.\n");
1860 return -EINTR;
1861 }
1862 }
1863 }
1864
1865 mutex_unlock(&channel->wr_mutex);
1866 }
1867
1868 return 0;
1869}
7ee9ded2 1870static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
48bae050
EB
1871{
1872 struct xilly_channel *channel = filp->private_data;
1873 loff_t pos = filp->f_pos;
1874 int rc = 0;
1875
1876 /*
1877 * Take both mutexes not allowing interrupts, since it seems like
1878 * common applications don't expect an -EINTR here. Besides, multiple
9379fd54 1879 * access to a single file descriptor on seekable devices is a mess
48bae050
EB
1880 * anyhow.
1881 */
1882
1883 if (channel->endpoint->fatal_error)
1884 return -EIO;
1885
1886 mutex_lock(&channel->wr_mutex);
1887 mutex_lock(&channel->rd_mutex);
1888
1889 switch (whence) {
1890 case 0:
1891 pos = offset;
1892 break;
1893 case 1:
1894 pos += offset;
1895 break;
1896 case 2:
1897 pos = offset; /* Going to the end => to the beginning */
1898 break;
1899 default:
1900 rc = -EINVAL;
1901 goto end;
1902 }
1903
1904 /* In any case, we must finish on an element boundary */
1905 if (pos & ((1 << channel->log2_element_size) - 1)) {
1906 rc = -EINVAL;
1907 goto end;
1908 }
1909
1910 mutex_lock(&channel->endpoint->register_mutex);
1911
1912 iowrite32(pos >> channel->log2_element_size,
1913 &channel->endpoint->registers[fpga_buf_offset_reg]);
1914 mmiowb();
1915 iowrite32((channel->chan_num << 1) |
1916 (6 << 24), /* Opcode 6, set address */
1917 &channel->endpoint->registers[fpga_buf_ctrl_reg]);
1918 mmiowb(); /* Just to appear safe */
1919
1920 mutex_unlock(&channel->endpoint->register_mutex);
1921
1922end:
1923 mutex_unlock(&channel->rd_mutex);
1924 mutex_unlock(&channel->wr_mutex);
1925
1926 if (rc) /* Return error after releasing mutexes */
1927 return rc;
1928
1929 filp->f_pos = pos;
1930
1931 /*
1932 * Since seekable devices are allowed only when the channel is
1933 * synchronous, we assume that there is no data pending in either
1934 * direction (which holds true as long as no concurrent access on the
1935 * file descriptor takes place).
1936 * The only thing we may need to throw away is leftovers from partial
1937 * write() flush.
1938 */
1939
1940 channel->rd_leftovers[3] = 0;
1941
1942 return pos;
1943}
1944
1945static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
1946{
1947 struct xilly_channel *channel = filp->private_data;
1948 unsigned int mask = 0;
1949 unsigned long flags;
1950
1951 poll_wait(filp, &channel->endpoint->ep_wait, wait);
1952
1953 /*
1954 * poll() won't play ball regarding read() channels which
1955 * aren't asynchronous and support the nonempty message. Allowing
1956 * that will create situations where data has been delivered at
1957 * the FPGA, and users expecting select() to wake up, which it may
1958 * not.
1959 */
1960
1961 if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
1962 poll_wait(filp, &channel->wr_wait, wait);
1963 poll_wait(filp, &channel->wr_ready_wait, wait);
1964
1965 spin_lock_irqsave(&channel->wr_spinlock, flags);
1966 if (!channel->wr_empty || channel->wr_ready)
1967 mask |= POLLIN | POLLRDNORM;
1968
1969 if (channel->wr_hangup)
1970 /*
1971 * Not POLLHUP, because its behavior is in the
1972 * mist, and POLLIN does what we want: Wake up
1973 * the read file descriptor so it sees EOF.
1974 */
1975 mask |= POLLIN | POLLRDNORM;
1976 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1977 }
1978
1979 /*
1980 * If partial data write is disallowed on a write() channel,
1981 * it's pointless to ever signal OK to write, because is could
1982 * block despite some space being available.
1983 */
1984
1985 if (channel->rd_allow_partial) {
1986 poll_wait(filp, &channel->rd_wait, wait);
1987
1988 spin_lock_irqsave(&channel->rd_spinlock, flags);
1989 if (!channel->rd_full)
1990 mask |= POLLOUT | POLLWRNORM;
1991 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1992 }
1993
1994 if (channel->endpoint->fatal_error)
1995 mask |= POLLERR;
1996
1997 return mask;
1998}
1999
2000static const struct file_operations xillybus_fops = {
2001 .owner = THIS_MODULE,
2002 .read = xillybus_read,
2003 .write = xillybus_write,
2004 .open = xillybus_open,
2005 .flush = xillybus_flush,
2006 .release = xillybus_release,
2007 .llseek = xillybus_llseek,
2008 .poll = xillybus_poll,
2009};
2010
2011static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
2012 const unsigned char *idt)
2013{
2014 int rc;
2015 dev_t dev;
2016 int devnum, i, minor, major;
2017 char devname[48];
2018 struct device *device;
2019
2020 rc = alloc_chrdev_region(&dev, 0, /* minor start */
2021 endpoint->num_channels,
2022 xillyname);
2023
2024 if (rc) {
2025 pr_warn("xillybus: Failed to obtain major/minors");
2026 goto error1;
2027 }
2028
2029 endpoint->major = major = MAJOR(dev);
2030 endpoint->lowest_minor = minor = MINOR(dev);
2031
2032 cdev_init(&endpoint->cdev, &xillybus_fops);
2033 endpoint->cdev.owner = endpoint->ephw->owner;
2034 rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
2035 endpoint->num_channels);
2036 if (rc) {
2037 pr_warn("xillybus: Failed to add cdev. Aborting.\n");
2038 goto error2;
2039 }
2040
2041 idt++;
2042
2043 for (i = minor, devnum = 0;
2044 devnum < endpoint->num_channels;
2045 devnum++, i++) {
2046 snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
2047
2048 devname[sizeof(devname)-1] = 0; /* Should never matter */
2049
2050 while (*idt++)
2051 /* Skip to next */;
2052
2053 device = device_create(xillybus_class,
2054 NULL,
2055 MKDEV(major, i),
2056 NULL,
e72b9da0 2057 "%s", devname);
48bae050
EB
2058
2059 if (IS_ERR(device)) {
2060 pr_warn("xillybus: Failed to create %s "
2061 "device. Aborting.\n", devname);
2062 goto error3;
2063 }
2064 }
2065
2066 pr_info("xillybus: Created %d device files.\n",
2067 endpoint->num_channels);
2068 return 0; /* succeed */
2069
2070error3:
2071 devnum--; i--;
2072 for (; devnum >= 0; devnum--, i--)
2073 device_destroy(xillybus_class, MKDEV(major, i));
2074
2075 cdev_del(&endpoint->cdev);
2076error2:
2077 unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
2078error1:
2079
2080 return rc;
2081}
2082
2083static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
2084{
2085 int minor;
2086
2087 for (minor = endpoint->lowest_minor;
2088 minor < (endpoint->lowest_minor + endpoint->num_channels);
2089 minor++)
2090 device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
2091 cdev_del(&endpoint->cdev);
2092 unregister_chrdev_region(MKDEV(endpoint->major,
2093 endpoint->lowest_minor),
2094 endpoint->num_channels);
2095
2096 pr_info("xillybus: Removed %d device files.\n",
2097 endpoint->num_channels);
2098}
2099
2100
2101struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
2102 struct device *dev,
2103 struct xilly_endpoint_hardware
2104 *ephw)
2105{
2106 struct xilly_endpoint *endpoint;
2107
2108 endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
2109 if (!endpoint) {
2110 pr_err("xillybus: Failed to allocate memory. Aborting.\n");
2111 return NULL;
2112 }
2113
2114 endpoint->pdev = pdev;
2115 endpoint->dev = dev;
2116 endpoint->ephw = ephw;
2117 INIT_LIST_HEAD(&endpoint->cleanup.to_kfree);
2118 INIT_LIST_HEAD(&endpoint->cleanup.to_pagefree);
2119 INIT_LIST_HEAD(&endpoint->cleanup.to_unmap);
2120 endpoint->msg_counter = 0x0b;
2121 endpoint->failed_messages = 0;
2122 endpoint->fatal_error = 0;
2123
2124 init_waitqueue_head(&endpoint->ep_wait);
2125 mutex_init(&endpoint->register_mutex);
2126
2127 return endpoint;
2128}
2129EXPORT_SYMBOL(xillybus_init_endpoint);
2130
2131static int xilly_quiesce(struct xilly_endpoint *endpoint)
2132{
2133 endpoint->idtlen = -1;
2134 wmb(); /* Make sure idtlen is set before sending command */
2135 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
2136 &endpoint->registers[fpga_dma_control_reg]);
2137 mmiowb();
2138
2139 wait_event_interruptible_timeout(endpoint->ep_wait,
2140 (endpoint->idtlen >= 0),
2141 XILLY_TIMEOUT);
2142
2143 if (endpoint->idtlen < 0) {
2144 pr_err("xillybus: Failed to quiesce the device on "
2145 "exit. Quitting while leaving a mess.\n");
2146 return -ENODEV;
2147 }
2148 return 0; /* Success */
2149}
2150
2151int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
2152{
2153 int rc = 0;
2154
2155 struct xilly_cleanup tmpmem;
2156 int idtbuffersize = (1 << PAGE_SHIFT);
2157
2158 /*
2159 * The bogus IDT is used during bootstrap for allocating the initial
2160 * message buffer, and then the message buffer and space for the IDT
2161 * itself. The initial message buffer is of a single page's size, but
2162 * it's soon replaced with a more modest one (and memory is freed).
2163 */
2164
2165 unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
2166 3, 192, PAGE_SHIFT, 0 };
2167 struct xilly_idt_handle idt_handle;
2168
2169 INIT_LIST_HEAD(&tmpmem.to_kfree);
2170 INIT_LIST_HEAD(&tmpmem.to_pagefree);
2171 INIT_LIST_HEAD(&tmpmem.to_unmap);
2172
2173 /*
9379fd54
MI
2174 * Writing the value 0x00000001 to Endianness register signals which
2175 * endianness this processor is using, so the FPGA can swap words as
48bae050
EB
2176 * necessary.
2177 */
2178
2179 iowrite32(1, &endpoint->registers[fpga_endian_reg]);
2180 mmiowb(); /* Writes below are affected by the one above. */
2181
2182 /* Bootstrap phase I: Allocate temporary message buffer */
2183
2184 endpoint->num_channels = 0;
2185
2186 rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 1);
2187
2188 if (rc)
2189 goto failed_buffers;
2190
2191 /* Clear the message subsystem (and counter in particular) */
2192 iowrite32(0x04, &endpoint->registers[fpga_msg_ctrl_reg]);
2193 mmiowb();
2194
2195 endpoint->idtlen = -1;
2196
2197 smp_wmb();
2198
2199 /*
2200 * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
2201 * buffer size.
2202 */
2203 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
2204 &endpoint->registers[fpga_dma_control_reg]);
2205 mmiowb();
2206
2207 wait_event_interruptible_timeout(endpoint->ep_wait,
2208 (endpoint->idtlen >= 0),
2209 XILLY_TIMEOUT);
2210
2211 if (endpoint->idtlen < 0) {
2212 pr_err("xillybus: No response from FPGA. Aborting.\n");
2213 rc = -ENODEV;
2214 goto failed_quiesce;
2215 }
2216
2217 /* Enable DMA */
2218 iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
2219 &endpoint->registers[fpga_dma_control_reg]);
2220 mmiowb();
2221
2222 /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
2223 while (endpoint->idtlen >= idtbuffersize) {
2224 idtbuffersize *= 2;
2225 bogus_idt[6]++;
2226 }
2227
2228 endpoint->num_channels = 1;
2229
2230 rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 2);
2231
2232 if (rc)
2233 goto failed_idt;
2234
2235 smp_wmb();
2236
2237 rc = xilly_obtain_idt(endpoint);
2238
2239 if (rc)
2240 goto failed_idt;
2241
2242 xilly_scan_idt(endpoint, &idt_handle);
2243
2244 if (!idt_handle.chandesc) {
2245 rc = -ENODEV;
2246 goto failed_idt;
2247 }
2248 /* Bootstrap phase III: Allocate buffers according to IDT */
2249
2250 rc = xilly_setupchannels(endpoint,
2251 &endpoint->cleanup,
2252 idt_handle.chandesc,
2253 idt_handle.entries);
2254
2255 if (rc)
2256 goto failed_idt;
2257
2258 smp_wmb(); /* mutex_lock below should suffice, but won't hurt.*/
2259
2260 /*
2261 * endpoint is now completely configured. We put it on the list
2262 * available to open() before registering the char device(s)
2263 */
2264
2265 mutex_lock(&ep_list_lock);
2266 list_add_tail(&endpoint->ep_list, &list_of_endpoints);
2267 mutex_unlock(&ep_list_lock);
2268
2269 rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
2270
2271 if (rc)
2272 goto failed_chrdevs;
2273
2274 xillybus_do_cleanup(&tmpmem, endpoint);
2275
2276 return 0;
2277
2278failed_chrdevs:
2279 mutex_lock(&ep_list_lock);
2280 list_del(&endpoint->ep_list);
2281 mutex_unlock(&ep_list_lock);
2282
2283failed_idt:
2284 /* Quiesce the device. Now it's serious to do it */
2285 rc = xilly_quiesce(endpoint);
2286
2287 if (rc)
2288 return rc; /* FPGA may still DMA, so no release */
2289
2290 flush_workqueue(xillybus_wq);
2291failed_quiesce:
2292failed_buffers:
2293 xillybus_do_cleanup(&tmpmem, endpoint);
2294
2295 return rc;
2296}
2297EXPORT_SYMBOL(xillybus_endpoint_discovery);
2298
2299void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
2300{
2301 xillybus_cleanup_chrdev(endpoint);
2302
2303 mutex_lock(&ep_list_lock);
2304 list_del(&endpoint->ep_list);
2305 mutex_unlock(&ep_list_lock);
2306
2307 xilly_quiesce(endpoint);
2308
2309 /*
2310 * Flushing is done upon endpoint release to prevent access to memory
2311 * just about to be released. This makes the quiesce complete.
2312 */
2313 flush_workqueue(xillybus_wq);
2314}
2315EXPORT_SYMBOL(xillybus_endpoint_remove);
2316
2317static int __init xillybus_init(void)
2318{
2319 int rc = 0;
2320
2321 mutex_init(&ep_list_lock);
2322
2323 xillybus_class = class_create(THIS_MODULE, xillyname);
2324 if (IS_ERR(xillybus_class)) {
2325 rc = PTR_ERR(xillybus_class);
2326 pr_warn("xillybus: Failed to register class xillybus\n");
2327
2328 return rc;
2329 }
2330
2331 xillybus_wq = alloc_workqueue(xillyname, 0, 0);
2332
2333 return 0; /* Success */
2334}
2335
2336static void __exit xillybus_exit(void)
2337{
2338 /* flush_workqueue() was called for each endpoint released */
2339 destroy_workqueue(xillybus_wq);
2340
2341 class_destroy(xillybus_class);
2342}
2343
2344module_init(xillybus_init);
2345module_exit(xillybus_exit);