2 * Driver for the Analog Devices AXI-DMAC core
4 * Copyright 2013-2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
7 * Licensed under the GPL-2.
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
24 #include <dt-bindings/dma/axi-dmac.h>
26 #include "dmaengine.h"
30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
31 * various instantiation parameters which decided the exact feature set support
34 * Each channel of the core has a source interface and a destination interface.
35 * The number of channels and the type of the channel interfaces is selected at
36 * configuration time. A interface can either be a connected to a central memory
37 * interconnect, which allows access to system memory, or it can be connected to
38 * a dedicated bus which is directly connected to a data port on a peripheral.
39 * Given that those are configuration options of the core that are selected when
40 * it is instantiated this means that they can not be changed by software at
41 * runtime. By extension this means that each channel is uni-directional. It can
42 * either be device to memory or memory to device, but not both. Also since the
43 * device side is a dedicated data bus only connected to a single peripheral
44 * there is no address than can or needs to be configured for the device side.
47 #define AXI_DMAC_REG_IRQ_MASK 0x80
48 #define AXI_DMAC_REG_IRQ_PENDING 0x84
49 #define AXI_DMAC_REG_IRQ_SOURCE 0x88
51 #define AXI_DMAC_REG_CTRL 0x400
52 #define AXI_DMAC_REG_TRANSFER_ID 0x404
53 #define AXI_DMAC_REG_START_TRANSFER 0x408
54 #define AXI_DMAC_REG_FLAGS 0x40c
55 #define AXI_DMAC_REG_DEST_ADDRESS 0x410
56 #define AXI_DMAC_REG_SRC_ADDRESS 0x414
57 #define AXI_DMAC_REG_X_LENGTH 0x418
58 #define AXI_DMAC_REG_Y_LENGTH 0x41c
59 #define AXI_DMAC_REG_DEST_STRIDE 0x420
60 #define AXI_DMAC_REG_SRC_STRIDE 0x424
61 #define AXI_DMAC_REG_TRANSFER_DONE 0x428
62 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
63 #define AXI_DMAC_REG_STATUS 0x430
64 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
65 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
67 #define AXI_DMAC_CTRL_ENABLE BIT(0)
68 #define AXI_DMAC_CTRL_PAUSE BIT(1)
70 #define AXI_DMAC_IRQ_SOT BIT(0)
71 #define AXI_DMAC_IRQ_EOT BIT(1)
73 #define AXI_DMAC_FLAG_CYCLIC BIT(0)
80 unsigned int dest_stride
;
81 unsigned int src_stride
;
85 struct axi_dmac_desc
{
86 struct virt_dma_desc vdesc
;
89 unsigned int num_submitted
;
90 unsigned int num_completed
;
92 struct axi_dmac_sg sg
[];
95 struct axi_dmac_chan
{
96 struct virt_dma_chan vchan
;
98 struct axi_dmac_desc
*next_desc
;
99 struct list_head active_descs
;
100 enum dma_transfer_direction direction
;
102 unsigned int src_width
;
103 unsigned int dest_width
;
104 unsigned int src_type
;
105 unsigned int dest_type
;
107 unsigned int max_length
;
108 unsigned int align_mask
;
120 struct dma_device dma_dev
;
121 struct axi_dmac_chan chan
;
123 struct device_dma_parameters dma_parms
;
126 static struct axi_dmac
*chan_to_axi_dmac(struct axi_dmac_chan
*chan
)
128 return container_of(chan
->vchan
.chan
.device
, struct axi_dmac
,
132 static struct axi_dmac_chan
*to_axi_dmac_chan(struct dma_chan
*c
)
134 return container_of(c
, struct axi_dmac_chan
, vchan
.chan
);
137 static struct axi_dmac_desc
*to_axi_dmac_desc(struct virt_dma_desc
*vdesc
)
139 return container_of(vdesc
, struct axi_dmac_desc
, vdesc
);
142 static void axi_dmac_write(struct axi_dmac
*axi_dmac
, unsigned int reg
,
145 writel(val
, axi_dmac
->base
+ reg
);
148 static int axi_dmac_read(struct axi_dmac
*axi_dmac
, unsigned int reg
)
150 return readl(axi_dmac
->base
+ reg
);
153 static int axi_dmac_src_is_mem(struct axi_dmac_chan
*chan
)
155 return chan
->src_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
158 static int axi_dmac_dest_is_mem(struct axi_dmac_chan
*chan
)
160 return chan
->dest_type
== AXI_DMAC_BUS_TYPE_AXI_MM
;
163 static bool axi_dmac_check_len(struct axi_dmac_chan
*chan
, unsigned int len
)
165 if (len
== 0 || len
> chan
->max_length
)
167 if ((len
& chan
->align_mask
) != 0) /* Not aligned */
172 static bool axi_dmac_check_addr(struct axi_dmac_chan
*chan
, dma_addr_t addr
)
174 if ((addr
& chan
->align_mask
) != 0) /* Not aligned */
179 static void axi_dmac_start_transfer(struct axi_dmac_chan
*chan
)
181 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
182 struct virt_dma_desc
*vdesc
;
183 struct axi_dmac_desc
*desc
;
184 struct axi_dmac_sg
*sg
;
185 unsigned int flags
= 0;
188 val
= axi_dmac_read(dmac
, AXI_DMAC_REG_START_TRANSFER
);
189 if (val
) /* Queue is full, wait for the next SOT IRQ */
192 desc
= chan
->next_desc
;
195 vdesc
= vchan_next_desc(&chan
->vchan
);
198 list_move_tail(&vdesc
->node
, &chan
->active_descs
);
199 desc
= to_axi_dmac_desc(vdesc
);
201 sg
= &desc
->sg
[desc
->num_submitted
];
203 desc
->num_submitted
++;
204 if (desc
->num_submitted
== desc
->num_sgs
)
205 chan
->next_desc
= NULL
;
207 chan
->next_desc
= desc
;
209 sg
->id
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_ID
);
211 if (axi_dmac_dest_is_mem(chan
)) {
212 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_ADDRESS
, sg
->dest_addr
);
213 axi_dmac_write(dmac
, AXI_DMAC_REG_DEST_STRIDE
, sg
->dest_stride
);
216 if (axi_dmac_src_is_mem(chan
)) {
217 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_ADDRESS
, sg
->src_addr
);
218 axi_dmac_write(dmac
, AXI_DMAC_REG_SRC_STRIDE
, sg
->src_stride
);
222 * If the hardware supports cyclic transfers and there is no callback to
223 * call, enable hw cyclic mode to avoid unnecessary interrupts.
225 if (chan
->hw_cyclic
&& desc
->cyclic
&& !desc
->vdesc
.tx
.callback
)
226 flags
|= AXI_DMAC_FLAG_CYCLIC
;
228 axi_dmac_write(dmac
, AXI_DMAC_REG_X_LENGTH
, sg
->x_len
- 1);
229 axi_dmac_write(dmac
, AXI_DMAC_REG_Y_LENGTH
, sg
->y_len
- 1);
230 axi_dmac_write(dmac
, AXI_DMAC_REG_FLAGS
, flags
);
231 axi_dmac_write(dmac
, AXI_DMAC_REG_START_TRANSFER
, 1);
234 static struct axi_dmac_desc
*axi_dmac_active_desc(struct axi_dmac_chan
*chan
)
236 return list_first_entry_or_null(&chan
->active_descs
,
237 struct axi_dmac_desc
, vdesc
.node
);
240 static void axi_dmac_transfer_done(struct axi_dmac_chan
*chan
,
241 unsigned int completed_transfers
)
243 struct axi_dmac_desc
*active
;
244 struct axi_dmac_sg
*sg
;
246 active
= axi_dmac_active_desc(chan
);
250 if (active
->cyclic
) {
251 vchan_cyclic_callback(&active
->vdesc
);
254 sg
= &active
->sg
[active
->num_completed
];
255 if (!(BIT(sg
->id
) & completed_transfers
))
257 active
->num_completed
++;
258 if (active
->num_completed
== active
->num_sgs
) {
259 list_del(&active
->vdesc
.node
);
260 vchan_cookie_complete(&active
->vdesc
);
261 active
= axi_dmac_active_desc(chan
);
267 static irqreturn_t
axi_dmac_interrupt_handler(int irq
, void *devid
)
269 struct axi_dmac
*dmac
= devid
;
270 unsigned int pending
;
272 pending
= axi_dmac_read(dmac
, AXI_DMAC_REG_IRQ_PENDING
);
276 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_PENDING
, pending
);
278 spin_lock(&dmac
->chan
.vchan
.lock
);
279 /* One or more transfers have finished */
280 if (pending
& AXI_DMAC_IRQ_EOT
) {
281 unsigned int completed
;
283 completed
= axi_dmac_read(dmac
, AXI_DMAC_REG_TRANSFER_DONE
);
284 axi_dmac_transfer_done(&dmac
->chan
, completed
);
286 /* Space has become available in the descriptor queue */
287 if (pending
& AXI_DMAC_IRQ_SOT
)
288 axi_dmac_start_transfer(&dmac
->chan
);
289 spin_unlock(&dmac
->chan
.vchan
.lock
);
294 static int axi_dmac_terminate_all(struct dma_chan
*c
)
296 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
297 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
301 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
302 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, 0);
303 chan
->next_desc
= NULL
;
304 vchan_get_all_descriptors(&chan
->vchan
, &head
);
305 list_splice_tail_init(&chan
->active_descs
, &head
);
306 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
308 vchan_dma_desc_free_list(&chan
->vchan
, &head
);
313 static void axi_dmac_synchronize(struct dma_chan
*c
)
315 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
317 vchan_synchronize(&chan
->vchan
);
320 static void axi_dmac_issue_pending(struct dma_chan
*c
)
322 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
323 struct axi_dmac
*dmac
= chan_to_axi_dmac(chan
);
326 axi_dmac_write(dmac
, AXI_DMAC_REG_CTRL
, AXI_DMAC_CTRL_ENABLE
);
328 spin_lock_irqsave(&chan
->vchan
.lock
, flags
);
329 if (vchan_issue_pending(&chan
->vchan
))
330 axi_dmac_start_transfer(chan
);
331 spin_unlock_irqrestore(&chan
->vchan
.lock
, flags
);
334 static struct axi_dmac_desc
*axi_dmac_alloc_desc(unsigned int num_sgs
)
336 struct axi_dmac_desc
*desc
;
338 desc
= kzalloc(sizeof(struct axi_dmac_desc
) +
339 sizeof(struct axi_dmac_sg
) * num_sgs
, GFP_NOWAIT
);
343 desc
->num_sgs
= num_sgs
;
348 static struct dma_async_tx_descriptor
*axi_dmac_prep_slave_sg(
349 struct dma_chan
*c
, struct scatterlist
*sgl
,
350 unsigned int sg_len
, enum dma_transfer_direction direction
,
351 unsigned long flags
, void *context
)
353 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
354 struct axi_dmac_desc
*desc
;
355 struct scatterlist
*sg
;
358 if (direction
!= chan
->direction
)
361 desc
= axi_dmac_alloc_desc(sg_len
);
365 for_each_sg(sgl
, sg
, sg_len
, i
) {
366 if (!axi_dmac_check_addr(chan
, sg_dma_address(sg
)) ||
367 !axi_dmac_check_len(chan
, sg_dma_len(sg
))) {
372 if (direction
== DMA_DEV_TO_MEM
)
373 desc
->sg
[i
].dest_addr
= sg_dma_address(sg
);
375 desc
->sg
[i
].src_addr
= sg_dma_address(sg
);
376 desc
->sg
[i
].x_len
= sg_dma_len(sg
);
377 desc
->sg
[i
].y_len
= 1;
380 desc
->cyclic
= false;
382 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
385 static struct dma_async_tx_descriptor
*axi_dmac_prep_dma_cyclic(
386 struct dma_chan
*c
, dma_addr_t buf_addr
, size_t buf_len
,
387 size_t period_len
, enum dma_transfer_direction direction
,
390 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
391 struct axi_dmac_desc
*desc
;
392 unsigned int num_periods
, i
;
394 if (direction
!= chan
->direction
)
397 if (!axi_dmac_check_len(chan
, buf_len
) ||
398 !axi_dmac_check_addr(chan
, buf_addr
))
401 if (period_len
== 0 || buf_len
% period_len
)
404 num_periods
= buf_len
/ period_len
;
406 desc
= axi_dmac_alloc_desc(num_periods
);
410 for (i
= 0; i
< num_periods
; i
++) {
411 if (direction
== DMA_DEV_TO_MEM
)
412 desc
->sg
[i
].dest_addr
= buf_addr
;
414 desc
->sg
[i
].src_addr
= buf_addr
;
415 desc
->sg
[i
].x_len
= period_len
;
416 desc
->sg
[i
].y_len
= 1;
417 buf_addr
+= period_len
;
422 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
425 static struct dma_async_tx_descriptor
*axi_dmac_prep_interleaved(
426 struct dma_chan
*c
, struct dma_interleaved_template
*xt
,
429 struct axi_dmac_chan
*chan
= to_axi_dmac_chan(c
);
430 struct axi_dmac_desc
*desc
;
431 size_t dst_icg
, src_icg
;
433 if (xt
->frame_size
!= 1)
436 if (xt
->dir
!= chan
->direction
)
439 if (axi_dmac_src_is_mem(chan
)) {
440 if (!xt
->src_inc
|| !axi_dmac_check_addr(chan
, xt
->src_start
))
444 if (axi_dmac_dest_is_mem(chan
)) {
445 if (!xt
->dst_inc
|| !axi_dmac_check_addr(chan
, xt
->dst_start
))
449 dst_icg
= dmaengine_get_dst_icg(xt
, &xt
->sgl
[0]);
450 src_icg
= dmaengine_get_src_icg(xt
, &xt
->sgl
[0]);
453 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
) ||
454 !axi_dmac_check_len(chan
, xt
->numf
))
456 if (xt
->sgl
[0].size
+ dst_icg
> chan
->max_length
||
457 xt
->sgl
[0].size
+ src_icg
> chan
->max_length
)
460 if (dst_icg
!= 0 || src_icg
!= 0)
462 if (chan
->max_length
/ xt
->sgl
[0].size
< xt
->numf
)
464 if (!axi_dmac_check_len(chan
, xt
->sgl
[0].size
* xt
->numf
))
468 desc
= axi_dmac_alloc_desc(1);
472 if (axi_dmac_src_is_mem(chan
)) {
473 desc
->sg
[0].src_addr
= xt
->src_start
;
474 desc
->sg
[0].src_stride
= xt
->sgl
[0].size
+ src_icg
;
477 if (axi_dmac_dest_is_mem(chan
)) {
478 desc
->sg
[0].dest_addr
= xt
->dst_start
;
479 desc
->sg
[0].dest_stride
= xt
->sgl
[0].size
+ dst_icg
;
483 desc
->sg
[0].x_len
= xt
->sgl
[0].size
;
484 desc
->sg
[0].y_len
= xt
->numf
;
486 desc
->sg
[0].x_len
= xt
->sgl
[0].size
* xt
->numf
;
487 desc
->sg
[0].y_len
= 1;
490 return vchan_tx_prep(&chan
->vchan
, &desc
->vdesc
, flags
);
493 static void axi_dmac_free_chan_resources(struct dma_chan
*c
)
495 vchan_free_chan_resources(to_virt_chan(c
));
498 static void axi_dmac_desc_free(struct virt_dma_desc
*vdesc
)
500 kfree(container_of(vdesc
, struct axi_dmac_desc
, vdesc
));
504 * The configuration stored in the devicetree matches the configuration
505 * parameters of the peripheral instance and allows the driver to know which
506 * features are implemented and how it should behave.
508 static int axi_dmac_parse_chan_dt(struct device_node
*of_chan
,
509 struct axi_dmac_chan
*chan
)
514 ret
= of_property_read_u32(of_chan
, "reg", &val
);
518 /* We only support 1 channel for now */
522 ret
= of_property_read_u32(of_chan
, "adi,source-bus-type", &val
);
525 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
527 chan
->src_type
= val
;
529 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-type", &val
);
532 if (val
> AXI_DMAC_BUS_TYPE_FIFO
)
534 chan
->dest_type
= val
;
536 ret
= of_property_read_u32(of_chan
, "adi,source-bus-width", &val
);
539 chan
->src_width
= val
/ 8;
541 ret
= of_property_read_u32(of_chan
, "adi,destination-bus-width", &val
);
544 chan
->dest_width
= val
/ 8;
546 ret
= of_property_read_u32(of_chan
, "adi,length-width", &val
);
551 chan
->max_length
= UINT_MAX
;
553 chan
->max_length
= (1ULL << val
) - 1;
555 chan
->align_mask
= max(chan
->dest_width
, chan
->src_width
) - 1;
557 if (axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
558 chan
->direction
= DMA_MEM_TO_MEM
;
559 else if (!axi_dmac_dest_is_mem(chan
) && axi_dmac_src_is_mem(chan
))
560 chan
->direction
= DMA_MEM_TO_DEV
;
561 else if (axi_dmac_dest_is_mem(chan
) && !axi_dmac_src_is_mem(chan
))
562 chan
->direction
= DMA_DEV_TO_MEM
;
564 chan
->direction
= DMA_DEV_TO_DEV
;
566 chan
->hw_cyclic
= of_property_read_bool(of_chan
, "adi,cyclic");
567 chan
->hw_2d
= of_property_read_bool(of_chan
, "adi,2d");
572 static int axi_dmac_probe(struct platform_device
*pdev
)
574 struct device_node
*of_channels
, *of_chan
;
575 struct dma_device
*dma_dev
;
576 struct axi_dmac
*dmac
;
577 struct resource
*res
;
580 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
584 dmac
->irq
= platform_get_irq(pdev
, 0);
590 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
591 dmac
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
592 if (IS_ERR(dmac
->base
))
593 return PTR_ERR(dmac
->base
);
595 dmac
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
596 if (IS_ERR(dmac
->clk
))
597 return PTR_ERR(dmac
->clk
);
599 INIT_LIST_HEAD(&dmac
->chan
.active_descs
);
601 of_channels
= of_get_child_by_name(pdev
->dev
.of_node
, "adi,channels");
602 if (of_channels
== NULL
)
605 for_each_child_of_node(of_channels
, of_chan
) {
606 ret
= axi_dmac_parse_chan_dt(of_chan
, &dmac
->chan
);
608 of_node_put(of_chan
);
609 of_node_put(of_channels
);
613 of_node_put(of_channels
);
615 pdev
->dev
.dma_parms
= &dmac
->dma_parms
;
616 dma_set_max_seg_size(&pdev
->dev
, dmac
->chan
.max_length
);
618 dma_dev
= &dmac
->dma_dev
;
619 dma_cap_set(DMA_SLAVE
, dma_dev
->cap_mask
);
620 dma_cap_set(DMA_CYCLIC
, dma_dev
->cap_mask
);
621 dma_dev
->device_free_chan_resources
= axi_dmac_free_chan_resources
;
622 dma_dev
->device_tx_status
= dma_cookie_status
;
623 dma_dev
->device_issue_pending
= axi_dmac_issue_pending
;
624 dma_dev
->device_prep_slave_sg
= axi_dmac_prep_slave_sg
;
625 dma_dev
->device_prep_dma_cyclic
= axi_dmac_prep_dma_cyclic
;
626 dma_dev
->device_prep_interleaved_dma
= axi_dmac_prep_interleaved
;
627 dma_dev
->device_terminate_all
= axi_dmac_terminate_all
;
628 dma_dev
->device_synchronize
= axi_dmac_synchronize
;
629 dma_dev
->dev
= &pdev
->dev
;
630 dma_dev
->chancnt
= 1;
631 dma_dev
->src_addr_widths
= BIT(dmac
->chan
.src_width
);
632 dma_dev
->dst_addr_widths
= BIT(dmac
->chan
.dest_width
);
633 dma_dev
->directions
= BIT(dmac
->chan
.direction
);
634 dma_dev
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
635 INIT_LIST_HEAD(&dma_dev
->channels
);
637 dmac
->chan
.vchan
.desc_free
= axi_dmac_desc_free
;
638 vchan_init(&dmac
->chan
.vchan
, dma_dev
);
640 ret
= clk_prepare_enable(dmac
->clk
);
644 axi_dmac_write(dmac
, AXI_DMAC_REG_IRQ_MASK
, 0x00);
646 ret
= dma_async_device_register(dma_dev
);
648 goto err_clk_disable
;
650 ret
= of_dma_controller_register(pdev
->dev
.of_node
,
651 of_dma_xlate_by_chan_id
, dma_dev
);
653 goto err_unregister_device
;
655 ret
= request_irq(dmac
->irq
, axi_dmac_interrupt_handler
, 0,
656 dev_name(&pdev
->dev
), dmac
);
658 goto err_unregister_of
;
660 platform_set_drvdata(pdev
, dmac
);
665 of_dma_controller_free(pdev
->dev
.of_node
);
666 err_unregister_device
:
667 dma_async_device_unregister(&dmac
->dma_dev
);
669 clk_disable_unprepare(dmac
->clk
);
674 static int axi_dmac_remove(struct platform_device
*pdev
)
676 struct axi_dmac
*dmac
= platform_get_drvdata(pdev
);
678 of_dma_controller_free(pdev
->dev
.of_node
);
679 free_irq(dmac
->irq
, dmac
);
680 tasklet_kill(&dmac
->chan
.vchan
.task
);
681 dma_async_device_unregister(&dmac
->dma_dev
);
682 clk_disable_unprepare(dmac
->clk
);
687 static const struct of_device_id axi_dmac_of_match_table
[] = {
688 { .compatible
= "adi,axi-dmac-1.00.a" },
691 MODULE_DEVICE_TABLE(of
, axi_dmac_of_match_table
);
693 static struct platform_driver axi_dmac_driver
= {
695 .name
= "dma-axi-dmac",
696 .of_match_table
= axi_dmac_of_match_table
,
698 .probe
= axi_dmac_probe
,
699 .remove
= axi_dmac_remove
,
701 module_platform_driver(axi_dmac_driver
);
703 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
704 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
705 MODULE_LICENSE("GPL v2");