]>
Commit | Line | Data |
---|---|---|
a14eddda VK |
1 | /* |
2 | * User-space DMA and UIO based Redrapids Pocket Change CardBus driver | |
3 | * | |
4 | * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org> | |
5 | * | |
6 | * Licensed under GPL version 2 only. | |
7 | */ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/uio_driver.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/cdev.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/sysfs.h> | |
17 | #include <linux/poll.h> | |
18 | #include <linux/idr.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/ioctl.h> | |
22 | #include <linux/io.h> | |
23 | ||
24 | #include "poch.h" | |
25 | ||
26 | #include <asm/cacheflush.h> | |
27 | ||
28 | #ifndef PCI_VENDOR_ID_RRAPIDS | |
29 | #define PCI_VENDOR_ID_RRAPIDS 0x17D2 | |
30 | #endif | |
31 | ||
32 | #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE | |
33 | #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351 | |
34 | #endif | |
35 | ||
36 | #define POCH_NCHANNELS 2 | |
37 | ||
38 | #define MAX_POCH_CARDS 8 | |
39 | #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS) | |
40 | ||
41 | #define DRV_NAME "poch" | |
42 | #define PFX DRV_NAME ": " | |
43 | ||
44 | /* | |
45 | * BAR0 Bridge Register Definitions | |
46 | */ | |
47 | ||
48 | #define BRIDGE_REV_REG 0x0 | |
49 | #define BRIDGE_INT_MASK_REG 0x4 | |
50 | #define BRIDGE_INT_STAT_REG 0x8 | |
51 | ||
52 | #define BRIDGE_INT_ACTIVE (0x1 << 31) | |
53 | #define BRIDGE_INT_FPGA (0x1 << 2) | |
54 | #define BRIDGE_INT_TEMP_FAIL (0x1 << 1) | |
55 | #define BRIDGE_INT_TEMP_WARN (0x1 << 0) | |
56 | ||
57 | #define BRIDGE_FPGA_RESET_REG 0xC | |
58 | ||
59 | #define BRIDGE_CARD_POWER_REG 0x10 | |
60 | #define BRIDGE_CARD_POWER_EN (0x1 << 0) | |
61 | #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31) | |
62 | ||
63 | #define BRIDGE_JTAG_REG 0x14 | |
64 | #define BRIDGE_DMA_GO_REG 0x18 | |
65 | #define BRIDGE_STAT_0_REG 0x1C | |
66 | #define BRIDGE_STAT_1_REG 0x20 | |
67 | #define BRIDGE_STAT_2_REG 0x24 | |
68 | #define BRIDGE_STAT_3_REG 0x28 | |
69 | #define BRIDGE_TEMP_STAT_REG 0x2C | |
70 | #define BRIDGE_TEMP_THRESH_REG 0x30 | |
71 | #define BRIDGE_EEPROM_REVSEL_REG 0x34 | |
72 | #define BRIDGE_CIS_STRUCT_REG 0x100 | |
73 | #define BRIDGE_BOARDREV_REG 0x124 | |
74 | ||
75 | /* | |
76 | * BAR1 FPGA Register Definitions | |
77 | */ | |
78 | ||
79 | #define FPGA_IFACE_REV_REG 0x0 | |
80 | #define FPGA_RX_BLOCK_SIZE_REG 0x8 | |
81 | #define FPGA_TX_BLOCK_SIZE_REG 0xC | |
82 | #define FPGA_RX_BLOCK_COUNT_REG 0x10 | |
83 | #define FPGA_TX_BLOCK_COUNT_REG 0x14 | |
84 | #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18 | |
85 | #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C | |
86 | #define FPGA_RX_GROUP_COUNT_REG 0x20 | |
87 | #define FPGA_TX_GROUP_COUNT_REG 0x24 | |
88 | #define FPGA_RX_CURR_GROUP_REG 0x28 | |
89 | #define FPGA_TX_CURR_GROUP_REG 0x2C | |
90 | #define FPGA_RX_CURR_PCI_REG 0x38 | |
91 | #define FPGA_TX_CURR_PCI_REG 0x3C | |
92 | #define FPGA_RX_GROUP0_START_REG 0x40 | |
93 | #define FPGA_TX_GROUP0_START_REG 0xC0 | |
94 | #define FPGA_DMA_DESC_1_REG 0x140 | |
95 | #define FPGA_DMA_DESC_2_REG 0x144 | |
96 | #define FPGA_DMA_DESC_3_REG 0x148 | |
97 | #define FPGA_DMA_DESC_4_REG 0x14C | |
98 | ||
99 | #define FPGA_DMA_INT_STAT_REG 0x150 | |
100 | #define FPGA_DMA_INT_MASK_REG 0x154 | |
101 | #define FPGA_DMA_INT_RX (1 << 0) | |
102 | #define FPGA_DMA_INT_TX (1 << 1) | |
103 | ||
104 | #define FPGA_RX_GROUPS_PER_INT_REG 0x158 | |
105 | #define FPGA_TX_GROUPS_PER_INT_REG 0x15C | |
106 | #define FPGA_DMA_ADR_PAGE_REG 0x160 | |
107 | #define FPGA_FPGA_REV_REG 0x200 | |
108 | ||
109 | #define FPGA_ADC_CLOCK_CTL_REG 0x204 | |
110 | #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3) | |
111 | #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN) | |
112 | #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0 | |
113 | ||
114 | #define FPGA_ADC_DAC_EN_REG 0x208 | |
115 | #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1) | |
116 | #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0) | |
117 | ||
118 | #define FPGA_INT_STAT_REG 0x20C | |
119 | #define FPGA_INT_MASK_REG 0x210 | |
120 | #define FPGA_INT_PLL_UNLOCKED (0x1 << 9) | |
121 | #define FPGA_INT_DMA_CORE (0x1 << 8) | |
122 | #define FPGA_INT_TX_FF_EMPTY (0x1 << 7) | |
123 | #define FPGA_INT_RX_FF_EMPTY (0x1 << 6) | |
124 | #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3) | |
125 | #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2) | |
126 | #define FPGA_INT_TX_ACQ_DONE (0x1 << 1) | |
127 | #define FPGA_INT_RX_ACQ_DONE (0x1) | |
128 | ||
129 | #define FPGA_RX_ADC_CTL_REG 0x214 | |
130 | #define FPGA_RX_ADC_CTL_CONT_CAP (0x0) | |
131 | #define FPGA_RX_ADC_CTL_SNAP_CAP (0x1) | |
132 | ||
133 | #define FPGA_RX_ARM_REG 0x21C | |
134 | ||
135 | #define FPGA_DOM_REG 0x224 | |
136 | #define FPGA_DOM_DCM_RESET (0x1 << 5) | |
137 | #define FPGA_DOM_SOFT_RESET (0x1 << 4) | |
138 | #define FPGA_DOM_DUAL_M_SG_DMA (0x0) | |
139 | #define FPGA_DOM_TARGET_ACCESS (0x1) | |
140 | ||
141 | #define FPGA_TX_CTL_REG 0x228 | |
142 | #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9) | |
143 | #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2) | |
144 | #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2) | |
145 | #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2) | |
146 | #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2) | |
147 | #define FPGA_TX_CTL_LOOPBACK (0x1 << 0) | |
148 | ||
149 | #define FPGA_ENDIAN_MODE_REG 0x22C | |
150 | #define FPGA_RX_FIFO_COUNT_REG 0x28C | |
151 | #define FPGA_TX_ENABLE_REG 0x298 | |
152 | #define FPGA_TX_TRIGGER_REG 0x29C | |
153 | #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8 | |
154 | #define FPGA_CAP_FIFO_REG 0x300 | |
155 | #define FPGA_TX_SNAPSHOT_REG 0x8000 | |
156 | ||
157 | /* | |
158 | * Channel Index Definitions | |
159 | */ | |
160 | ||
161 | enum { | |
162 | CHNO_RX_CHANNEL, | |
163 | CHNO_TX_CHANNEL, | |
164 | }; | |
165 | ||
166 | struct poch_dev; | |
167 | ||
168 | enum channel_dir { | |
169 | CHANNEL_DIR_RX, | |
170 | CHANNEL_DIR_TX, | |
171 | }; | |
172 | ||
173 | struct poch_group_info { | |
174 | struct page *pg; | |
175 | dma_addr_t dma_addr; | |
176 | unsigned long user_offset; | |
177 | }; | |
178 | ||
179 | struct channel_info { | |
180 | unsigned int chno; | |
181 | ||
182 | atomic_t sys_block_size; | |
183 | atomic_t sys_group_size; | |
184 | atomic_t sys_group_count; | |
185 | ||
186 | enum channel_dir dir; | |
187 | ||
188 | unsigned long block_size; | |
189 | unsigned long group_size; | |
190 | unsigned long group_count; | |
191 | ||
192 | /* Contains the DMA address and VM offset of each group. */ | |
193 | struct poch_group_info *groups; | |
194 | ||
195 | /* Contains the header and circular buffer exported to userspace. */ | |
196 | spinlock_t group_offsets_lock; | |
197 | struct poch_cbuf_header *header; | |
198 | struct page *header_pg; | |
199 | unsigned long header_size; | |
200 | ||
201 | /* Last group indicated as 'complete' to user space. */ | |
202 | unsigned int transfer; | |
203 | ||
204 | wait_queue_head_t wq; | |
205 | ||
206 | union { | |
207 | unsigned int data_available; | |
208 | unsigned int space_available; | |
209 | }; | |
210 | ||
211 | void __iomem *bridge_iomem; | |
212 | void __iomem *fpga_iomem; | |
213 | spinlock_t *iomem_lock; | |
214 | ||
215 | atomic_t free; | |
216 | atomic_t inited; | |
217 | ||
218 | /* Error counters */ | |
219 | struct poch_counters counters; | |
220 | spinlock_t counters_lock; | |
221 | ||
222 | struct device *dev; | |
223 | }; | |
224 | ||
225 | struct poch_dev { | |
226 | struct uio_info uio; | |
227 | struct pci_dev *pci_dev; | |
228 | unsigned int nchannels; | |
229 | struct channel_info channels[POCH_NCHANNELS]; | |
230 | struct cdev cdev; | |
231 | ||
232 | /* Counts the no. of channels that have been opened. On first | |
233 | * open, the card is powered on. On last channel close, the | |
234 | * card is powered off. | |
235 | */ | |
236 | atomic_t usage; | |
237 | ||
238 | void __iomem *bridge_iomem; | |
239 | void __iomem *fpga_iomem; | |
240 | spinlock_t iomem_lock; | |
241 | ||
242 | struct device *dev; | |
243 | }; | |
244 | ||
245 | static dev_t poch_first_dev; | |
246 | static struct class *poch_cls; | |
247 | static DEFINE_IDR(poch_ids); | |
248 | ||
249 | static ssize_t store_block_size(struct device *dev, | |
250 | struct device_attribute *attr, | |
251 | const char *buf, size_t count) | |
252 | { | |
253 | struct channel_info *channel = dev_get_drvdata(dev); | |
254 | unsigned long block_size; | |
255 | ||
256 | sscanf(buf, "%lu", &block_size); | |
257 | atomic_set(&channel->sys_block_size, block_size); | |
258 | ||
259 | return count; | |
260 | } | |
261 | static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size); | |
262 | ||
263 | static ssize_t store_group_size(struct device *dev, | |
264 | struct device_attribute *attr, | |
265 | const char *buf, size_t count) | |
266 | { | |
267 | struct channel_info *channel = dev_get_drvdata(dev); | |
268 | unsigned long group_size; | |
269 | ||
270 | sscanf(buf, "%lu", &group_size); | |
271 | atomic_set(&channel->sys_group_size, group_size); | |
272 | ||
273 | return count; | |
274 | } | |
275 | static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size); | |
276 | ||
277 | static ssize_t store_group_count(struct device *dev, | |
278 | struct device_attribute *attr, | |
279 | const char *buf, size_t count) | |
280 | { | |
281 | struct channel_info *channel = dev_get_drvdata(dev); | |
282 | unsigned long group_count; | |
283 | ||
284 | sscanf(buf, "%lu", &group_count); | |
285 | atomic_set(&channel->sys_group_count, group_count); | |
286 | ||
287 | return count; | |
288 | } | |
289 | static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count); | |
290 | ||
291 | static ssize_t show_direction(struct device *dev, | |
292 | struct device_attribute *attr, char *buf) | |
293 | { | |
294 | struct channel_info *channel = dev_get_drvdata(dev); | |
295 | int len; | |
296 | ||
297 | len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx")); | |
298 | return len; | |
299 | } | |
300 | static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL); | |
301 | ||
ea0a337f VK |
302 | static unsigned long npages(unsigned long bytes) |
303 | { | |
304 | if (bytes % PAGE_SIZE == 0) | |
305 | return bytes / PAGE_SIZE; | |
306 | else | |
307 | return (bytes / PAGE_SIZE) + 1; | |
308 | } | |
309 | ||
a14eddda VK |
310 | static ssize_t show_mmap_size(struct device *dev, |
311 | struct device_attribute *attr, char *buf) | |
312 | { | |
313 | struct channel_info *channel = dev_get_drvdata(dev); | |
314 | int len; | |
315 | unsigned long mmap_size; | |
316 | unsigned long group_pages; | |
317 | unsigned long header_pages; | |
318 | unsigned long total_group_pages; | |
319 | ||
ea0a337f VK |
320 | group_pages = npages(channel->group_size); |
321 | header_pages = npages(channel->header_size); | |
a14eddda VK |
322 | total_group_pages = group_pages * channel->group_count; |
323 | ||
324 | mmap_size = (header_pages + total_group_pages) * PAGE_SIZE; | |
325 | len = sprintf(buf, "%lu\n", mmap_size); | |
326 | return len; | |
327 | } | |
328 | static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL); | |
329 | ||
330 | static struct device_attribute *poch_class_attrs[] = { | |
331 | &dev_attr_block_size, | |
332 | &dev_attr_group_size, | |
333 | &dev_attr_group_count, | |
334 | &dev_attr_dir, | |
335 | &dev_attr_mmap_size, | |
336 | }; | |
337 | ||
338 | static void poch_channel_free_groups(struct channel_info *channel) | |
339 | { | |
340 | unsigned long i; | |
341 | ||
342 | for (i = 0; i < channel->group_count; i++) { | |
343 | struct poch_group_info *group; | |
344 | unsigned int order; | |
345 | ||
346 | group = &channel->groups[i]; | |
347 | order = get_order(channel->group_size); | |
348 | if (group->pg) | |
349 | __free_pages(group->pg, order); | |
350 | } | |
351 | } | |
352 | ||
353 | static int poch_channel_alloc_groups(struct channel_info *channel) | |
354 | { | |
355 | unsigned long i; | |
356 | unsigned long group_pages; | |
357 | unsigned long header_pages; | |
358 | ||
ea0a337f VK |
359 | group_pages = npages(channel->group_size); |
360 | header_pages = npages(channel->header_size); | |
a14eddda VK |
361 | |
362 | for (i = 0; i < channel->group_count; i++) { | |
363 | struct poch_group_info *group; | |
364 | unsigned int order; | |
365 | gfp_t gfp_mask; | |
366 | ||
367 | group = &channel->groups[i]; | |
368 | order = get_order(channel->group_size); | |
369 | ||
370 | /* | |
371 | * __GFP_COMP is required here since we are going to | |
372 | * perform non-linear mapping to userspace. For more | |
373 | * information read the vm_insert_page() function | |
374 | * comments. | |
375 | */ | |
376 | ||
377 | gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO; | |
378 | group->pg = alloc_pages(gfp_mask, order); | |
379 | if (!group->pg) { | |
380 | poch_channel_free_groups(channel); | |
381 | return -ENOMEM; | |
382 | } | |
383 | ||
384 | /* FIXME: This is the physical address not the bus | |
385 | * address! This won't work in architectures that | |
386 | * have an IOMMU. Can we use pci_map_single() for | |
387 | * this? | |
388 | */ | |
389 | group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE; | |
390 | group->user_offset = | |
391 | (header_pages + (i * group_pages)) * PAGE_SIZE; | |
392 | ||
393 | printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i, | |
394 | group->user_offset, group->dma_addr); | |
395 | } | |
396 | ||
397 | return 0; | |
398 | } | |
399 | ||
7dadbbcf | 400 | static int channel_latch_attr(struct channel_info *channel) |
a14eddda VK |
401 | { |
402 | channel->group_count = atomic_read(&channel->sys_group_count); | |
403 | channel->group_size = atomic_read(&channel->sys_group_size); | |
404 | channel->block_size = atomic_read(&channel->sys_block_size); | |
7dadbbcf VK |
405 | |
406 | if (channel->group_count == 0) { | |
407 | printk(KERN_ERR PFX "invalid group count %lu", | |
408 | channel->group_count); | |
409 | return -EINVAL; | |
410 | } | |
411 | ||
412 | if (channel->group_size == 0 || | |
413 | channel->group_size < channel->block_size) { | |
414 | printk(KERN_ERR PFX "invalid group size %lu", | |
415 | channel->group_size); | |
416 | return -EINVAL; | |
417 | } | |
418 | ||
419 | if (channel->block_size == 0 || (channel->block_size % 8) != 0) { | |
420 | printk(KERN_ERR PFX "invalid block size %lu", | |
421 | channel->block_size); | |
422 | return -EINVAL; | |
423 | } | |
424 | ||
425 | if (channel->group_size % channel->block_size != 0) { | |
426 | printk(KERN_ERR PFX | |
427 | "group size should be multiple of block size"); | |
428 | return -EINVAL; | |
429 | } | |
430 | ||
431 | return 0; | |
a14eddda VK |
432 | } |
433 | ||
434 | /* | |
435 | * Configure DMA group registers | |
436 | */ | |
437 | static void channel_dma_init(struct channel_info *channel) | |
438 | { | |
439 | void __iomem *fpga = channel->fpga_iomem; | |
440 | u32 group_regs_base; | |
441 | u32 group_reg; | |
442 | unsigned int page; | |
443 | unsigned int group_in_page; | |
444 | unsigned long i; | |
445 | u32 block_size_reg; | |
446 | u32 block_count_reg; | |
447 | u32 group_count_reg; | |
448 | u32 groups_per_int_reg; | |
449 | u32 curr_pci_reg; | |
450 | ||
451 | if (channel->chno == CHNO_RX_CHANNEL) { | |
452 | group_regs_base = FPGA_RX_GROUP0_START_REG; | |
453 | block_size_reg = FPGA_RX_BLOCK_SIZE_REG; | |
454 | block_count_reg = FPGA_RX_BLOCK_COUNT_REG; | |
455 | group_count_reg = FPGA_RX_GROUP_COUNT_REG; | |
456 | groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG; | |
457 | curr_pci_reg = FPGA_RX_CURR_PCI_REG; | |
458 | } else { | |
459 | group_regs_base = FPGA_TX_GROUP0_START_REG; | |
460 | block_size_reg = FPGA_TX_BLOCK_SIZE_REG; | |
461 | block_count_reg = FPGA_TX_BLOCK_COUNT_REG; | |
462 | group_count_reg = FPGA_TX_GROUP_COUNT_REG; | |
463 | groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG; | |
464 | curr_pci_reg = FPGA_TX_CURR_PCI_REG; | |
465 | } | |
466 | ||
467 | printk(KERN_WARNING "block_size, group_size, group_count\n"); | |
95ead520 VK |
468 | /* |
469 | * Block size is represented in no. of 64 bit transfers. | |
470 | */ | |
471 | iowrite32(channel->block_size / 8, fpga + block_size_reg); | |
a14eddda VK |
472 | iowrite32(channel->group_size / channel->block_size, |
473 | fpga + block_count_reg); | |
474 | iowrite32(channel->group_count, fpga + group_count_reg); | |
475 | /* FIXME: Hardcoded groups per int. Get it from sysfs? */ | |
476 | iowrite32(1, fpga + groups_per_int_reg); | |
477 | ||
478 | /* Unlock PCI address? Not defined in the data sheet, but used | |
479 | * in the reference code by Redrapids. | |
480 | */ | |
481 | iowrite32(0x1, fpga + curr_pci_reg); | |
482 | ||
483 | /* The DMA address page register is shared between the RX and | |
484 | * TX channels, so acquire lock. | |
485 | */ | |
486 | spin_lock(channel->iomem_lock); | |
487 | for (i = 0; i < channel->group_count; i++) { | |
488 | page = i / 32; | |
489 | group_in_page = i % 32; | |
490 | ||
491 | group_reg = group_regs_base + (group_in_page * 4); | |
492 | ||
493 | iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG); | |
494 | iowrite32(channel->groups[i].dma_addr, fpga + group_reg); | |
495 | } | |
496 | for (i = 0; i < channel->group_count; i++) { | |
497 | page = i / 32; | |
498 | group_in_page = i % 32; | |
499 | ||
500 | group_reg = group_regs_base + (group_in_page * 4); | |
501 | ||
502 | iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG); | |
503 | printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i, | |
504 | ioread32(fpga + group_reg)); | |
505 | } | |
506 | spin_unlock(channel->iomem_lock); | |
507 | ||
508 | } | |
509 | ||
510 | static int poch_channel_alloc_header(struct channel_info *channel) | |
511 | { | |
512 | struct poch_cbuf_header *header = channel->header; | |
513 | unsigned long group_offset_size; | |
514 | unsigned long tot_group_offsets_size; | |
515 | ||
516 | /* Allocate memory to hold header exported userspace */ | |
517 | group_offset_size = sizeof(header->group_offsets[0]); | |
518 | tot_group_offsets_size = group_offset_size * channel->group_count; | |
519 | channel->header_size = sizeof(*header) + tot_group_offsets_size; | |
520 | channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, | |
521 | get_order(channel->header_size)); | |
522 | if (!channel->header_pg) | |
523 | return -ENOMEM; | |
524 | ||
525 | channel->header = page_address(channel->header_pg); | |
526 | ||
527 | return 0; | |
528 | } | |
529 | ||
530 | static void poch_channel_free_header(struct channel_info *channel) | |
531 | { | |
532 | unsigned int order; | |
533 | ||
534 | order = get_order(channel->header_size); | |
535 | __free_pages(channel->header_pg, order); | |
536 | } | |
537 | ||
538 | static void poch_channel_init_header(struct channel_info *channel) | |
539 | { | |
540 | int i; | |
541 | struct poch_group_info *groups; | |
542 | s32 *group_offsets; | |
543 | ||
544 | channel->header->group_size_bytes = channel->group_size; | |
545 | channel->header->group_count = channel->group_count; | |
546 | ||
547 | spin_lock_init(&channel->group_offsets_lock); | |
548 | ||
549 | group_offsets = channel->header->group_offsets; | |
550 | groups = channel->groups; | |
551 | ||
552 | for (i = 0; i < channel->group_count; i++) { | |
553 | if (channel->dir == CHANNEL_DIR_RX) | |
554 | group_offsets[i] = -1; | |
555 | else | |
556 | group_offsets[i] = groups[i].user_offset; | |
557 | } | |
558 | } | |
559 | ||
560 | static void __poch_channel_clear_counters(struct channel_info *channel) | |
561 | { | |
562 | channel->counters.pll_unlock = 0; | |
563 | channel->counters.fifo_empty = 0; | |
564 | channel->counters.fifo_overflow = 0; | |
565 | } | |
566 | ||
567 | static int poch_channel_init(struct channel_info *channel, | |
568 | struct poch_dev *poch_dev) | |
569 | { | |
570 | struct pci_dev *pdev = poch_dev->pci_dev; | |
571 | struct device *dev = &pdev->dev; | |
572 | unsigned long alloc_size; | |
573 | int ret; | |
574 | ||
575 | printk(KERN_WARNING "channel_latch_attr\n"); | |
576 | ||
7dadbbcf VK |
577 | ret = channel_latch_attr(channel); |
578 | if (ret != 0) | |
579 | goto out; | |
a14eddda VK |
580 | |
581 | channel->transfer = 0; | |
582 | ||
583 | /* Allocate memory to hold group information. */ | |
584 | alloc_size = channel->group_count * sizeof(struct poch_group_info); | |
585 | channel->groups = kzalloc(alloc_size, GFP_KERNEL); | |
586 | if (!channel->groups) { | |
587 | dev_err(dev, "error allocating memory for group info\n"); | |
588 | ret = -ENOMEM; | |
589 | goto out; | |
590 | } | |
591 | ||
592 | printk(KERN_WARNING "poch_channel_alloc_groups\n"); | |
593 | ||
594 | ret = poch_channel_alloc_groups(channel); | |
595 | if (ret) { | |
596 | dev_err(dev, "error allocating groups of order %d\n", | |
597 | get_order(channel->group_size)); | |
598 | goto out_free_group_info; | |
599 | } | |
600 | ||
601 | ret = poch_channel_alloc_header(channel); | |
602 | if (ret) { | |
603 | dev_err(dev, "error allocating user space header\n"); | |
604 | goto out_free_groups; | |
605 | } | |
606 | ||
607 | channel->fpga_iomem = poch_dev->fpga_iomem; | |
608 | channel->bridge_iomem = poch_dev->bridge_iomem; | |
609 | channel->iomem_lock = &poch_dev->iomem_lock; | |
610 | spin_lock_init(&channel->counters_lock); | |
611 | ||
612 | __poch_channel_clear_counters(channel); | |
613 | ||
614 | printk(KERN_WARNING "poch_channel_init_header\n"); | |
615 | ||
616 | poch_channel_init_header(channel); | |
617 | ||
618 | return 0; | |
619 | ||
620 | out_free_groups: | |
621 | poch_channel_free_groups(channel); | |
622 | out_free_group_info: | |
623 | kfree(channel->groups); | |
624 | out: | |
625 | return ret; | |
626 | } | |
627 | ||
628 | static int poch_wait_fpga_prog(void __iomem *bridge) | |
629 | { | |
630 | unsigned long total_wait; | |
631 | const unsigned long wait_period = 100; | |
632 | /* FIXME: Get the actual timeout */ | |
633 | const unsigned long prog_timeo = 10000; /* 10 Seconds */ | |
634 | u32 card_power; | |
635 | ||
636 | printk(KERN_WARNING "poch_wait_fpg_prog\n"); | |
637 | ||
638 | printk(KERN_INFO PFX "programming fpga ...\n"); | |
639 | total_wait = 0; | |
640 | while (1) { | |
641 | msleep(wait_period); | |
642 | total_wait += wait_period; | |
643 | ||
644 | card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG); | |
645 | if (card_power & BRIDGE_CARD_POWER_PROG_DONE) { | |
646 | printk(KERN_INFO PFX "programming done\n"); | |
647 | return 0; | |
648 | } | |
649 | if (total_wait > prog_timeo) { | |
650 | printk(KERN_ERR PFX | |
651 | "timed out while programming FPGA\n"); | |
652 | return -EIO; | |
653 | } | |
654 | } | |
655 | } | |
656 | ||
657 | static void poch_card_power_off(struct poch_dev *poch_dev) | |
658 | { | |
659 | void __iomem *bridge = poch_dev->bridge_iomem; | |
660 | u32 card_power; | |
661 | ||
662 | iowrite32(0, bridge + BRIDGE_INT_MASK_REG); | |
663 | iowrite32(0, bridge + BRIDGE_DMA_GO_REG); | |
664 | ||
665 | card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG); | |
666 | iowrite32(card_power & ~BRIDGE_CARD_POWER_EN, | |
667 | bridge + BRIDGE_CARD_POWER_REG); | |
668 | } | |
669 | ||
670 | enum clk_src { | |
671 | CLK_SRC_ON_BOARD, | |
672 | CLK_SRC_EXTERNAL | |
673 | }; | |
674 | ||
675 | static void poch_card_clock_on(void __iomem *fpga) | |
676 | { | |
677 | /* FIXME: Get this data through sysfs? */ | |
678 | enum clk_src clk_src = CLK_SRC_ON_BOARD; | |
679 | ||
680 | if (clk_src == CLK_SRC_ON_BOARD) { | |
681 | iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN, | |
682 | fpga + FPGA_ADC_CLOCK_CTL_REG); | |
683 | } else if (clk_src == CLK_SRC_EXTERNAL) { | |
684 | iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK, | |
685 | fpga + FPGA_ADC_CLOCK_CTL_REG); | |
686 | } | |
687 | } | |
688 | ||
689 | static int poch_card_power_on(struct poch_dev *poch_dev) | |
690 | { | |
691 | void __iomem *bridge = poch_dev->bridge_iomem; | |
692 | void __iomem *fpga = poch_dev->fpga_iomem; | |
693 | ||
694 | iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG); | |
695 | ||
696 | if (poch_wait_fpga_prog(bridge) != 0) { | |
697 | poch_card_power_off(poch_dev); | |
698 | return -EIO; | |
699 | } | |
700 | ||
701 | poch_card_clock_on(fpga); | |
702 | ||
703 | /* Sync to new clock, reset state machines, set DMA mode. */ | |
704 | iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET | |
705 | | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG); | |
706 | ||
707 | /* FIXME: The time required for sync. needs to be tuned. */ | |
708 | msleep(1000); | |
709 | ||
710 | return 0; | |
711 | } | |
712 | ||
713 | static void poch_channel_analog_on(struct channel_info *channel) | |
714 | { | |
715 | void __iomem *fpga = channel->fpga_iomem; | |
716 | u32 adc_dac_en; | |
717 | ||
718 | spin_lock(channel->iomem_lock); | |
719 | adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG); | |
720 | switch (channel->chno) { | |
721 | case CHNO_RX_CHANNEL: | |
722 | iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF, | |
723 | fpga + FPGA_ADC_DAC_EN_REG); | |
724 | break; | |
725 | case CHNO_TX_CHANNEL: | |
726 | iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF, | |
727 | fpga + FPGA_ADC_DAC_EN_REG); | |
728 | break; | |
729 | } | |
730 | spin_unlock(channel->iomem_lock); | |
731 | } | |
732 | ||
733 | static int poch_open(struct inode *inode, struct file *filp) | |
734 | { | |
735 | struct poch_dev *poch_dev; | |
736 | struct channel_info *channel; | |
737 | void __iomem *bridge; | |
738 | void __iomem *fpga; | |
739 | int chno; | |
740 | int usage; | |
741 | int ret; | |
742 | ||
743 | poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev); | |
744 | bridge = poch_dev->bridge_iomem; | |
745 | fpga = poch_dev->fpga_iomem; | |
746 | ||
747 | chno = iminor(inode) % poch_dev->nchannels; | |
748 | channel = &poch_dev->channels[chno]; | |
749 | ||
750 | if (!atomic_dec_and_test(&channel->free)) { | |
751 | atomic_inc(&channel->free); | |
752 | ret = -EBUSY; | |
753 | goto out; | |
754 | } | |
755 | ||
756 | usage = atomic_inc_return(&poch_dev->usage); | |
757 | ||
758 | printk(KERN_WARNING "poch_card_power_on\n"); | |
759 | ||
760 | if (usage == 1) { | |
761 | ret = poch_card_power_on(poch_dev); | |
762 | if (ret) | |
763 | goto out_dec_usage; | |
764 | } | |
765 | ||
766 | printk(KERN_INFO "CardBus Bridge Revision: %x\n", | |
767 | ioread32(bridge + BRIDGE_REV_REG)); | |
768 | printk(KERN_INFO "CardBus Interface Revision: %x\n", | |
769 | ioread32(fpga + FPGA_IFACE_REV_REG)); | |
770 | ||
771 | channel->chno = chno; | |
772 | filp->private_data = channel; | |
773 | ||
774 | printk(KERN_WARNING "poch_channel_init\n"); | |
775 | ||
776 | ret = poch_channel_init(channel, poch_dev); | |
777 | if (ret) | |
778 | goto out_power_off; | |
779 | ||
780 | poch_channel_analog_on(channel); | |
781 | ||
782 | printk(KERN_WARNING "channel_dma_init\n"); | |
783 | ||
784 | channel_dma_init(channel); | |
785 | ||
786 | printk(KERN_WARNING "poch_channel_analog_on\n"); | |
787 | ||
788 | if (usage == 1) { | |
789 | printk(KERN_WARNING "setting up DMA\n"); | |
790 | ||
791 | /* Initialize DMA Controller. */ | |
792 | iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG); | |
793 | iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG); | |
794 | ||
795 | ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
796 | ioread32(fpga + FPGA_INT_STAT_REG); | |
797 | ioread32(bridge + BRIDGE_INT_STAT_REG); | |
798 | ||
799 | /* Initialize Interrupts. FIXME: Enable temperature | |
800 | * handling We are enabling both Tx and Rx channel | |
801 | * interrupts here. Do we need to enable interrupts | |
802 | * only for the current channel? Anyways we won't get | |
803 | * the interrupt unless the DMA is activated. | |
804 | */ | |
805 | iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG); | |
806 | iowrite32(FPGA_INT_DMA_CORE | |
807 | | FPGA_INT_PLL_UNLOCKED | |
808 | | FPGA_INT_TX_FF_EMPTY | |
809 | | FPGA_INT_RX_FF_EMPTY | |
810 | | FPGA_INT_TX_FF_OVRFLW | |
811 | | FPGA_INT_RX_FF_OVRFLW, | |
812 | fpga + FPGA_INT_MASK_REG); | |
813 | iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX, | |
814 | fpga + FPGA_DMA_INT_MASK_REG); | |
815 | } | |
816 | ||
817 | if (channel->dir == CHANNEL_DIR_TX) { | |
818 | /* Flush TX FIFO and output data from cardbus. */ | |
819 | iowrite32(FPGA_TX_CTL_FIFO_FLUSH | |
820 | | FPGA_TX_CTL_OUTPUT_CARDBUS, | |
821 | fpga + FPGA_TX_CTL_REG); | |
822 | } | |
823 | ||
824 | atomic_inc(&channel->inited); | |
825 | ||
826 | return 0; | |
827 | ||
828 | out_power_off: | |
829 | if (usage == 1) | |
830 | poch_card_power_off(poch_dev); | |
831 | out_dec_usage: | |
832 | atomic_dec(&poch_dev->usage); | |
833 | atomic_inc(&channel->free); | |
834 | out: | |
835 | return ret; | |
836 | } | |
837 | ||
838 | static int poch_release(struct inode *inode, struct file *filp) | |
839 | { | |
840 | struct channel_info *channel = filp->private_data; | |
841 | struct poch_dev *poch_dev; | |
842 | int usage; | |
843 | ||
844 | poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev); | |
845 | ||
846 | usage = atomic_dec_return(&poch_dev->usage); | |
847 | if (usage == 0) { | |
848 | printk(KERN_WARNING "poch_card_power_off\n"); | |
849 | poch_card_power_off(poch_dev); | |
850 | } | |
851 | ||
852 | atomic_dec(&channel->inited); | |
853 | poch_channel_free_header(channel); | |
854 | poch_channel_free_groups(channel); | |
855 | kfree(channel->groups); | |
856 | atomic_inc(&channel->free); | |
857 | ||
858 | return 0; | |
859 | } | |
860 | ||
861 | /* | |
862 | * Map the header and the group buffers, to user space. | |
863 | */ | |
864 | static int poch_mmap(struct file *filp, struct vm_area_struct *vma) | |
865 | { | |
866 | struct channel_info *channel = filp->private_data; | |
867 | ||
868 | unsigned long start; | |
869 | unsigned long size; | |
870 | ||
871 | unsigned long group_pages; | |
872 | unsigned long header_pages; | |
873 | unsigned long total_group_pages; | |
874 | ||
875 | int pg_num; | |
876 | struct page *pg; | |
877 | ||
878 | int i; | |
879 | int ret; | |
880 | ||
881 | printk(KERN_WARNING "poch_mmap\n"); | |
882 | ||
883 | if (vma->vm_pgoff) { | |
884 | printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff); | |
885 | return -EINVAL; | |
886 | } | |
887 | ||
ea0a337f VK |
888 | group_pages = npages(channel->group_size); |
889 | header_pages = npages(channel->header_size); | |
a14eddda VK |
890 | total_group_pages = group_pages * channel->group_count; |
891 | ||
892 | size = vma->vm_end - vma->vm_start; | |
893 | if (size != (header_pages + total_group_pages) * PAGE_SIZE) { | |
894 | printk(KERN_WARNING PFX "required %lu bytes\n", size); | |
895 | return -EINVAL; | |
896 | } | |
897 | ||
898 | start = vma->vm_start; | |
899 | ||
900 | /* FIXME: Cleanup required on failure? */ | |
901 | pg = channel->header_pg; | |
902 | for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) { | |
903 | printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg)); | |
904 | printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start); | |
905 | ret = vm_insert_page(vma, start, pg); | |
906 | if (ret) { | |
907 | printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start); | |
908 | return ret; | |
909 | } | |
910 | start += PAGE_SIZE; | |
911 | } | |
912 | ||
913 | for (i = 0; i < channel->group_count; i++) { | |
914 | pg = channel->groups[i].pg; | |
915 | for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) { | |
916 | printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n", | |
917 | pg_num, i, start); | |
918 | ret = vm_insert_page(vma, start, pg); | |
919 | if (ret) { | |
920 | printk(KERN_DEBUG PFX | |
921 | "vm_insert 2 failed at %d\n", pg_num); | |
922 | return ret; | |
923 | } | |
924 | start += PAGE_SIZE; | |
925 | } | |
926 | } | |
927 | ||
928 | return 0; | |
929 | } | |
930 | ||
931 | /* | |
932 | * Check whether there is some group that the user space has not | |
933 | * consumed yet. When the user space consumes a group, it sets it to | |
934 | * -1. Cosuming could be reading data in case of RX and filling a | |
935 | * buffer in case of TX. | |
936 | */ | |
937 | static int poch_channel_available(struct channel_info *channel) | |
938 | { | |
939 | int i; | |
940 | ||
941 | spin_lock_irq(&channel->group_offsets_lock); | |
942 | ||
943 | for (i = 0; i < channel->group_count; i++) { | |
944 | if (channel->dir == CHANNEL_DIR_RX | |
945 | && channel->header->group_offsets[i] == -1) { | |
946 | spin_unlock_irq(&channel->group_offsets_lock); | |
947 | return 1; | |
948 | } | |
949 | ||
950 | if (channel->dir == CHANNEL_DIR_TX | |
951 | && channel->header->group_offsets[i] != -1) { | |
952 | spin_unlock_irq(&channel->group_offsets_lock); | |
953 | return 1; | |
954 | } | |
955 | } | |
956 | ||
957 | spin_unlock_irq(&channel->group_offsets_lock); | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | static unsigned int poch_poll(struct file *filp, poll_table *pt) | |
963 | { | |
964 | struct channel_info *channel = filp->private_data; | |
965 | unsigned int ret = 0; | |
966 | ||
967 | poll_wait(filp, &channel->wq, pt); | |
968 | ||
969 | if (poch_channel_available(channel)) { | |
970 | if (channel->dir == CHANNEL_DIR_RX) | |
971 | ret = POLLIN | POLLRDNORM; | |
972 | else | |
973 | ret = POLLOUT | POLLWRNORM; | |
974 | } | |
975 | ||
976 | return ret; | |
977 | } | |
978 | ||
979 | static int poch_ioctl(struct inode *inode, struct file *filp, | |
980 | unsigned int cmd, unsigned long arg) | |
981 | { | |
982 | struct channel_info *channel = filp->private_data; | |
983 | void __iomem *fpga = channel->fpga_iomem; | |
984 | void __iomem *bridge = channel->bridge_iomem; | |
985 | void __user *argp = (void __user *)arg; | |
986 | struct vm_area_struct *vms; | |
987 | struct poch_counters counters; | |
988 | int ret; | |
989 | ||
990 | switch (cmd) { | |
991 | case POCH_IOC_TRANSFER_START: | |
992 | switch (channel->chno) { | |
993 | case CHNO_TX_CHANNEL: | |
994 | printk(KERN_INFO PFX "ioctl: Tx start\n"); | |
995 | iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG); | |
996 | iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG); | |
997 | ||
998 | /* FIXME: Does it make sense to do a DMA GO | |
999 | * twice, once in Tx and once in Rx. | |
1000 | */ | |
1001 | iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG); | |
1002 | break; | |
1003 | case CHNO_RX_CHANNEL: | |
1004 | printk(KERN_INFO PFX "ioctl: Rx start\n"); | |
1005 | iowrite32(0x1, fpga + FPGA_RX_ARM_REG); | |
1006 | iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG); | |
1007 | break; | |
1008 | } | |
1009 | break; | |
1010 | case POCH_IOC_TRANSFER_STOP: | |
1011 | switch (channel->chno) { | |
1012 | case CHNO_TX_CHANNEL: | |
1013 | printk(KERN_INFO PFX "ioctl: Tx stop\n"); | |
1014 | iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG); | |
1015 | iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG); | |
1016 | iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG); | |
1017 | break; | |
1018 | case CHNO_RX_CHANNEL: | |
1019 | printk(KERN_INFO PFX "ioctl: Rx stop\n"); | |
1020 | iowrite32(0x0, fpga + FPGA_RX_ARM_REG); | |
1021 | iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG); | |
1022 | break; | |
1023 | } | |
1024 | break; | |
1025 | case POCH_IOC_GET_COUNTERS: | |
1026 | if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters))) | |
1027 | return -EFAULT; | |
1028 | ||
1029 | spin_lock_irq(&channel->counters_lock); | |
1030 | counters = channel->counters; | |
1031 | __poch_channel_clear_counters(channel); | |
1032 | spin_unlock_irq(&channel->counters_lock); | |
1033 | ||
1034 | ret = copy_to_user(argp, &counters, | |
1035 | sizeof(struct poch_counters)); | |
1036 | if (ret) | |
1037 | return ret; | |
1038 | ||
1039 | break; | |
1040 | case POCH_IOC_SYNC_GROUP_FOR_USER: | |
1041 | case POCH_IOC_SYNC_GROUP_FOR_DEVICE: | |
1042 | vms = find_vma(current->mm, arg); | |
1043 | if (!vms) | |
1044 | /* Address not mapped. */ | |
1045 | return -EINVAL; | |
1046 | if (vms->vm_file != filp) | |
1047 | /* Address mapped from different device/file. */ | |
1048 | return -EINVAL; | |
1049 | ||
1050 | flush_cache_range(vms, arg, arg + channel->group_size); | |
1051 | break; | |
1052 | } | |
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static struct file_operations poch_fops = { | |
1057 | .owner = THIS_MODULE, | |
1058 | .open = poch_open, | |
1059 | .release = poch_release, | |
1060 | .ioctl = poch_ioctl, | |
1061 | .poll = poch_poll, | |
1062 | .mmap = poch_mmap | |
1063 | }; | |
1064 | ||
1065 | static void poch_irq_dma(struct channel_info *channel) | |
1066 | { | |
1067 | u32 prev_transfer; | |
1068 | u32 curr_transfer; | |
1069 | long groups_done; | |
1070 | unsigned long i, j; | |
1071 | struct poch_group_info *groups; | |
1072 | s32 *group_offsets; | |
1073 | u32 curr_group_reg; | |
1074 | ||
1075 | if (!atomic_read(&channel->inited)) | |
1076 | return; | |
1077 | ||
1078 | prev_transfer = channel->transfer; | |
1079 | ||
1080 | if (channel->chno == CHNO_RX_CHANNEL) | |
1081 | curr_group_reg = FPGA_RX_CURR_GROUP_REG; | |
1082 | else | |
1083 | curr_group_reg = FPGA_TX_CURR_GROUP_REG; | |
1084 | ||
1085 | curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg); | |
1086 | ||
1087 | groups_done = curr_transfer - prev_transfer; | |
1088 | /* Check wrap over, and handle it. */ | |
1089 | if (groups_done <= 0) | |
1090 | groups_done += channel->group_count; | |
1091 | ||
1092 | group_offsets = channel->header->group_offsets; | |
1093 | groups = channel->groups; | |
1094 | ||
1095 | spin_lock(&channel->group_offsets_lock); | |
1096 | ||
1097 | for (i = 0; i < groups_done; i++) { | |
1098 | j = (prev_transfer + i) % channel->group_count; | |
1099 | if (channel->dir == CHANNEL_DIR_RX) | |
1100 | group_offsets[j] = -1; | |
1101 | else | |
1102 | group_offsets[j] = groups[j].user_offset; | |
1103 | } | |
1104 | ||
1105 | spin_unlock(&channel->group_offsets_lock); | |
1106 | ||
1107 | channel->transfer = curr_transfer; | |
1108 | ||
1109 | wake_up_interruptible(&channel->wq); | |
1110 | } | |
1111 | ||
1112 | static irqreturn_t poch_irq_handler(int irq, void *p) | |
1113 | { | |
1114 | struct poch_dev *poch_dev = p; | |
1115 | void __iomem *bridge = poch_dev->bridge_iomem; | |
1116 | void __iomem *fpga = poch_dev->fpga_iomem; | |
1117 | struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL]; | |
1118 | struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL]; | |
1119 | u32 bridge_stat; | |
1120 | u32 fpga_stat; | |
1121 | u32 dma_stat; | |
1122 | ||
1123 | bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG); | |
1124 | fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG); | |
1125 | dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
1126 | ||
1127 | ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
1128 | ioread32(fpga + FPGA_INT_STAT_REG); | |
1129 | ioread32(bridge + BRIDGE_INT_STAT_REG); | |
1130 | ||
1131 | if (bridge_stat & BRIDGE_INT_FPGA) { | |
1132 | if (fpga_stat & FPGA_INT_DMA_CORE) { | |
1133 | if (dma_stat & FPGA_DMA_INT_RX) | |
1134 | poch_irq_dma(channel_rx); | |
1135 | if (dma_stat & FPGA_DMA_INT_TX) | |
1136 | poch_irq_dma(channel_tx); | |
1137 | } | |
1138 | if (fpga_stat & FPGA_INT_PLL_UNLOCKED) { | |
1139 | channel_tx->counters.pll_unlock++; | |
1140 | channel_rx->counters.pll_unlock++; | |
1141 | if (printk_ratelimit()) | |
1142 | printk(KERN_WARNING PFX "PLL unlocked\n"); | |
1143 | } | |
1144 | if (fpga_stat & FPGA_INT_TX_FF_EMPTY) | |
1145 | channel_tx->counters.fifo_empty++; | |
1146 | if (fpga_stat & FPGA_INT_TX_FF_OVRFLW) | |
1147 | channel_tx->counters.fifo_overflow++; | |
1148 | if (fpga_stat & FPGA_INT_RX_FF_EMPTY) | |
1149 | channel_rx->counters.fifo_empty++; | |
1150 | if (fpga_stat & FPGA_INT_RX_FF_OVRFLW) | |
1151 | channel_rx->counters.fifo_overflow++; | |
1152 | ||
1153 | /* | |
1154 | * FIXME: These errors should be notified through the | |
1155 | * poll interface as POLLERR. | |
1156 | */ | |
1157 | ||
1158 | /* Re-enable interrupts. */ | |
1159 | iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG); | |
1160 | ||
1161 | return IRQ_HANDLED; | |
1162 | } | |
1163 | ||
1164 | return IRQ_NONE; | |
1165 | } | |
1166 | ||
1167 | static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id) | |
1168 | { | |
1169 | int i, j; | |
1170 | int nattrs; | |
1171 | struct channel_info *channel; | |
1172 | dev_t devno; | |
1173 | ||
1174 | if (poch_dev->dev == NULL) | |
1175 | return; | |
1176 | ||
1177 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1178 | channel = &poch_dev->channels[i]; | |
1179 | devno = poch_first_dev + (id * poch_dev->nchannels) + i; | |
1180 | ||
1181 | if (!channel->dev) | |
1182 | continue; | |
1183 | ||
1184 | nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]); | |
1185 | for (j = 0; j < nattrs; j++) | |
1186 | device_remove_file(channel->dev, poch_class_attrs[j]); | |
1187 | ||
1188 | device_unregister(channel->dev); | |
1189 | } | |
1190 | ||
1191 | device_unregister(poch_dev->dev); | |
1192 | } | |
1193 | ||
1194 | static int __devinit poch_class_dev_register(struct poch_dev *poch_dev, | |
1195 | int id) | |
1196 | { | |
1197 | struct device *dev = &poch_dev->pci_dev->dev; | |
1198 | int i, j; | |
1199 | int nattrs; | |
1200 | int ret; | |
1201 | struct channel_info *channel; | |
1202 | dev_t devno; | |
1203 | ||
1204 | poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev, | |
1205 | MKDEV(0, 0), NULL, "poch%d", id); | |
1206 | if (IS_ERR(poch_dev->dev)) { | |
1207 | dev_err(dev, "error creating parent class device"); | |
1208 | ret = PTR_ERR(poch_dev->dev); | |
1209 | poch_dev->dev = NULL; | |
1210 | return ret; | |
1211 | } | |
1212 | ||
1213 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1214 | channel = &poch_dev->channels[i]; | |
1215 | ||
1216 | devno = poch_first_dev + (id * poch_dev->nchannels) + i; | |
1217 | channel->dev = device_create(poch_cls, poch_dev->dev, devno, | |
1218 | NULL, "ch%d", i); | |
1219 | if (IS_ERR(channel->dev)) { | |
1220 | dev_err(dev, "error creating channel class device"); | |
1221 | ret = PTR_ERR(channel->dev); | |
1222 | channel->dev = NULL; | |
1223 | poch_class_dev_unregister(poch_dev, id); | |
1224 | return ret; | |
1225 | } | |
1226 | ||
1227 | dev_set_drvdata(channel->dev, channel); | |
1228 | nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]); | |
1229 | for (j = 0; j < nattrs; j++) { | |
1230 | ret = device_create_file(channel->dev, | |
1231 | poch_class_attrs[j]); | |
1232 | if (ret) { | |
1233 | dev_err(dev, "error creating attribute file"); | |
1234 | poch_class_dev_unregister(poch_dev, id); | |
1235 | return ret; | |
1236 | } | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | return 0; | |
1241 | } | |
1242 | ||
1243 | static int __devinit poch_pci_probe(struct pci_dev *pdev, | |
1244 | const struct pci_device_id *pci_id) | |
1245 | { | |
1246 | struct device *dev = &pdev->dev; | |
1247 | struct poch_dev *poch_dev; | |
1248 | struct uio_info *uio; | |
1249 | int ret; | |
1250 | int id; | |
1251 | int i; | |
1252 | ||
1253 | poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL); | |
1254 | if (!poch_dev) { | |
1255 | dev_err(dev, "error allocating priv. data memory\n"); | |
1256 | return -ENOMEM; | |
1257 | } | |
1258 | ||
1259 | poch_dev->pci_dev = pdev; | |
1260 | uio = &poch_dev->uio; | |
1261 | ||
1262 | pci_set_drvdata(pdev, poch_dev); | |
1263 | ||
1264 | spin_lock_init(&poch_dev->iomem_lock); | |
1265 | ||
1266 | poch_dev->nchannels = POCH_NCHANNELS; | |
1267 | poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX; | |
1268 | poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX; | |
1269 | ||
1270 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1271 | init_waitqueue_head(&poch_dev->channels[i].wq); | |
1272 | atomic_set(&poch_dev->channels[i].free, 1); | |
1273 | atomic_set(&poch_dev->channels[i].inited, 0); | |
1274 | } | |
1275 | ||
1276 | ret = pci_enable_device(pdev); | |
1277 | if (ret) { | |
1278 | dev_err(dev, "error enabling device\n"); | |
1279 | goto out_free; | |
1280 | } | |
1281 | ||
1282 | ret = pci_request_regions(pdev, "poch"); | |
1283 | if (ret) { | |
1284 | dev_err(dev, "error requesting resources\n"); | |
1285 | goto out_disable; | |
1286 | } | |
1287 | ||
1288 | uio->mem[0].addr = pci_resource_start(pdev, 1); | |
1289 | if (!uio->mem[0].addr) { | |
1290 | dev_err(dev, "invalid BAR1\n"); | |
1291 | ret = -ENODEV; | |
1292 | goto out_release; | |
1293 | } | |
1294 | ||
1295 | uio->mem[0].size = pci_resource_len(pdev, 1); | |
1296 | uio->mem[0].memtype = UIO_MEM_PHYS; | |
1297 | ||
1298 | uio->name = "poch"; | |
1299 | uio->version = "0.0.1"; | |
1300 | uio->irq = -1; | |
1301 | ret = uio_register_device(dev, uio); | |
1302 | if (ret) { | |
1303 | dev_err(dev, "error register UIO device: %d\n", ret); | |
1304 | goto out_release; | |
1305 | } | |
1306 | ||
1307 | poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0), | |
1308 | pci_resource_len(pdev, 0)); | |
1309 | if (poch_dev->bridge_iomem == NULL) { | |
1310 | dev_err(dev, "error mapping bridge (bar0) registers\n"); | |
1311 | ret = -ENOMEM; | |
1312 | goto out_uio_unreg; | |
1313 | } | |
1314 | ||
1315 | poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1), | |
1316 | pci_resource_len(pdev, 1)); | |
1317 | if (poch_dev->fpga_iomem == NULL) { | |
1318 | dev_err(dev, "error mapping fpga (bar1) registers\n"); | |
1319 | ret = -ENOMEM; | |
1320 | goto out_bar0_unmap; | |
1321 | } | |
1322 | ||
1323 | ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED, | |
1324 | dev->bus_id, poch_dev); | |
1325 | if (ret) { | |
1326 | dev_err(dev, "error requesting IRQ %u\n", pdev->irq); | |
1327 | ret = -ENOMEM; | |
1328 | goto out_bar1_unmap; | |
1329 | } | |
1330 | ||
1331 | if (!idr_pre_get(&poch_ids, GFP_KERNEL)) { | |
1332 | dev_err(dev, "error allocating memory ids\n"); | |
1333 | ret = -ENOMEM; | |
1334 | goto out_free_irq; | |
1335 | } | |
1336 | ||
1337 | idr_get_new(&poch_ids, poch_dev, &id); | |
1338 | if (id >= MAX_POCH_CARDS) { | |
1339 | dev_err(dev, "minors exhausted\n"); | |
1340 | ret = -EBUSY; | |
1341 | goto out_free_irq; | |
1342 | } | |
1343 | ||
1344 | cdev_init(&poch_dev->cdev, &poch_fops); | |
1345 | poch_dev->cdev.owner = THIS_MODULE; | |
1346 | ret = cdev_add(&poch_dev->cdev, | |
1347 | poch_first_dev + (id * poch_dev->nchannels), | |
1348 | poch_dev->nchannels); | |
1349 | if (ret) { | |
1350 | dev_err(dev, "error register character device\n"); | |
1351 | goto out_idr_remove; | |
1352 | } | |
1353 | ||
1354 | ret = poch_class_dev_register(poch_dev, id); | |
1355 | if (ret) | |
1356 | goto out_cdev_del; | |
1357 | ||
1358 | return 0; | |
1359 | ||
1360 | out_cdev_del: | |
1361 | cdev_del(&poch_dev->cdev); | |
1362 | out_idr_remove: | |
1363 | idr_remove(&poch_ids, id); | |
1364 | out_free_irq: | |
1365 | free_irq(pdev->irq, poch_dev); | |
1366 | out_bar1_unmap: | |
1367 | iounmap(poch_dev->fpga_iomem); | |
1368 | out_bar0_unmap: | |
1369 | iounmap(poch_dev->bridge_iomem); | |
1370 | out_uio_unreg: | |
1371 | uio_unregister_device(uio); | |
1372 | out_release: | |
1373 | pci_release_regions(pdev); | |
1374 | out_disable: | |
1375 | pci_disable_device(pdev); | |
1376 | out_free: | |
1377 | kfree(poch_dev); | |
1378 | return ret; | |
1379 | } | |
1380 | ||
1381 | /* | |
1382 | * FIXME: We are yet to handle the hot unplug case. | |
1383 | */ | |
1384 | static void poch_pci_remove(struct pci_dev *pdev) | |
1385 | { | |
1386 | struct poch_dev *poch_dev = pci_get_drvdata(pdev); | |
1387 | struct uio_info *uio = &poch_dev->uio; | |
1388 | unsigned int minor = MINOR(poch_dev->cdev.dev); | |
1389 | unsigned int id = minor / poch_dev->nchannels; | |
1390 | ||
a14eddda VK |
1391 | poch_class_dev_unregister(poch_dev, id); |
1392 | cdev_del(&poch_dev->cdev); | |
1393 | idr_remove(&poch_ids, id); | |
1394 | free_irq(pdev->irq, poch_dev); | |
7dadbbcf VK |
1395 | iounmap(poch_dev->fpga_iomem); |
1396 | iounmap(poch_dev->bridge_iomem); | |
a14eddda VK |
1397 | uio_unregister_device(uio); |
1398 | pci_release_regions(pdev); | |
1399 | pci_disable_device(pdev); | |
1400 | pci_set_drvdata(pdev, NULL); | |
1401 | iounmap(uio->mem[0].internal_addr); | |
1402 | ||
1403 | kfree(poch_dev); | |
1404 | } | |
1405 | ||
1406 | static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = { | |
1407 | { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS, | |
1408 | PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) }, | |
1409 | { 0, } | |
1410 | }; | |
1411 | ||
1412 | static struct pci_driver poch_pci_driver = { | |
1413 | .name = DRV_NAME, | |
1414 | .id_table = poch_pci_ids, | |
1415 | .probe = poch_pci_probe, | |
1416 | .remove = poch_pci_remove, | |
1417 | }; | |
1418 | ||
1419 | static int __init poch_init_module(void) | |
1420 | { | |
1421 | int ret = 0; | |
1422 | ||
1423 | ret = alloc_chrdev_region(&poch_first_dev, 0, | |
1424 | MAX_POCH_DEVICES, DRV_NAME); | |
1425 | if (ret) { | |
1426 | printk(KERN_ERR PFX "error allocating device no."); | |
1427 | return ret; | |
1428 | } | |
1429 | ||
1430 | poch_cls = class_create(THIS_MODULE, "pocketchange"); | |
1431 | if (IS_ERR(poch_cls)) { | |
1432 | ret = PTR_ERR(poch_cls); | |
1433 | goto out_unreg_chrdev; | |
1434 | } | |
1435 | ||
1436 | ret = pci_register_driver(&poch_pci_driver); | |
1437 | if (ret) { | |
1438 | printk(KERN_ERR PFX "error register PCI device"); | |
1439 | goto out_class_destroy; | |
1440 | } | |
1441 | ||
1442 | return 0; | |
1443 | ||
1444 | out_class_destroy: | |
1445 | class_destroy(poch_cls); | |
1446 | ||
1447 | out_unreg_chrdev: | |
1448 | unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES); | |
1449 | ||
1450 | return ret; | |
1451 | } | |
1452 | ||
1453 | static void __exit poch_exit_module(void) | |
1454 | { | |
1455 | pci_unregister_driver(&poch_pci_driver); | |
1456 | class_destroy(poch_cls); | |
1457 | unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES); | |
1458 | } | |
1459 | ||
1460 | module_init(poch_init_module); | |
1461 | module_exit(poch_exit_module); | |
1462 | ||
1463 | MODULE_LICENSE("GPL v2"); |