1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * RapidIO mport character device
5 * Copyright 2014-2015 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * Copyright 2014-2015 Prodrive Technologies
8 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10 * Copyright (C) 2014 Texas Instruments Incorporated
11 * Aurelien Jacquiot <a-jacquiot@ti.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/cdev.h>
16 #include <linux/ioctl.h>
17 #include <linux/uaccess.h>
18 #include <linux/list.h>
20 #include <linux/err.h>
21 #include <linux/net.h>
22 #include <linux/poll.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/kfifo.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mman.h>
32 #include <linux/dma-mapping.h>
33 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
34 #include <linux/dmaengine.h>
37 #include <linux/rio.h>
38 #include <linux/rio_ids.h>
39 #include <linux/rio_drv.h>
40 #include <linux/rio_mport_cdev.h>
44 #define DRV_NAME "rio_mport"
45 #define DRV_PREFIX DRV_NAME ": "
46 #define DEV_NAME "rio_mport"
47 #define DRV_VERSION "1.0.0"
49 /* Debug output filtering masks */
52 DBG_INIT
= BIT(0), /* driver init */
53 DBG_EXIT
= BIT(1), /* driver exit */
54 DBG_MPORT
= BIT(2), /* mport add/remove */
55 DBG_RDEV
= BIT(3), /* RapidIO device add/remove */
56 DBG_DMA
= BIT(4), /* DMA transfer messages */
57 DBG_MMAP
= BIT(5), /* mapping messages */
58 DBG_IBW
= BIT(6), /* inbound window */
59 DBG_EVENT
= BIT(7), /* event handling messages */
60 DBG_OBW
= BIT(8), /* outbound window messages */
61 DBG_DBELL
= BIT(9), /* doorbell messages */
66 #define rmcd_debug(level, fmt, arg...) \
68 if (DBG_##level & dbg_level) \
69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
72 #define rmcd_debug(level, fmt, arg...) \
73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
76 #define rmcd_warn(fmt, arg...) \
77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
79 #define rmcd_error(fmt, arg...) \
80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
82 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86 MODULE_DESCRIPTION("RapidIO mport character device driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION
);
90 static int dma_timeout
= 3000; /* DMA transfer timeout in msec */
91 module_param(dma_timeout
, int, S_IRUGO
);
92 MODULE_PARM_DESC(dma_timeout
, "DMA Transfer Timeout in msec (default: 3000)");
95 static u32 dbg_level
= DBG_NONE
;
96 module_param(dbg_level
, uint
, S_IWUSR
| S_IWGRP
| S_IRUGO
);
97 MODULE_PARM_DESC(dbg_level
, "Debugging output level (default 0 = none)");
101 * An internal DMA coherent buffer
103 struct mport_dma_buf
{
113 * Internal memory mapping structure
115 enum rio_mport_map_dir
{
121 struct rio_mport_mapping
{
122 struct list_head node
;
123 struct mport_dev
*md
;
124 enum rio_mport_map_dir dir
;
127 dma_addr_t phys_addr
; /* for mmap */
128 void *virt_addr
; /* kernel address, for dma_free_coherent */
130 struct kref ref
; /* refcount of vmas sharing the mapping */
134 struct rio_mport_dma_map
{
141 #define MPORT_MAX_DMA_BUFS 16
142 #define MPORT_EVENT_DEPTH 10
145 * mport_dev driver-specific structure that represents mport device
146 * @active mport device status flag
147 * @node list node to maintain list of registered mports
148 * @cdev character device
149 * @dev associated device object
150 * @mport associated subsystem's master port device object
151 * @buf_mutex lock for buffer handling
152 * @file_mutex - lock for open files list
153 * @file_list - list of open files on given mport
154 * @properties properties of this mport
155 * @portwrites queue of inbound portwrites
156 * @pw_lock lock for port write queue
157 * @mappings queue for memory mappings
158 * @dma_chan DMA channels associated with this device
164 struct list_head node
;
167 struct rio_mport
*mport
;
168 struct mutex buf_mutex
;
169 struct mutex file_mutex
;
170 struct list_head file_list
;
171 struct rio_mport_properties properties
;
172 struct list_head doorbells
;
174 struct list_head portwrites
;
176 struct list_head mappings
;
177 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
178 struct dma_chan
*dma_chan
;
180 struct completion comp
;
185 * mport_cdev_priv - data structure specific to individual file object
186 * associated with an open device
187 * @md master port character device object
188 * @async_queue - asynchronous notification queue
189 * @list - file objects tracking list
190 * @db_filters inbound doorbell filters for this descriptor
191 * @pw_filters portwrite filters for this descriptor
192 * @event_fifo event fifo for this descriptor
193 * @event_rx_wait wait queue for this descriptor
194 * @fifo_lock lock for event_fifo
195 * @event_mask event mask for this descriptor
196 * @dmach DMA engine channel allocated for specific file object
198 struct mport_cdev_priv
{
199 struct mport_dev
*md
;
200 struct fasync_struct
*async_queue
;
201 struct list_head list
;
202 struct list_head db_filters
;
203 struct list_head pw_filters
;
204 struct kfifo event_fifo
;
205 wait_queue_head_t event_rx_wait
;
206 spinlock_t fifo_lock
;
207 u32 event_mask
; /* RIO_DOORBELL, RIO_PORTWRITE */
208 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
209 struct dma_chan
*dmach
;
210 struct list_head async_list
;
212 struct mutex dma_lock
;
214 struct completion comp
;
219 * rio_mport_pw_filter - structure to describe a portwrite filter
220 * md_node node in mport device's list
221 * priv_node node in private file object's list
222 * priv reference to private data
223 * filter actual portwrite filter
225 struct rio_mport_pw_filter
{
226 struct list_head md_node
;
227 struct list_head priv_node
;
228 struct mport_cdev_priv
*priv
;
229 struct rio_pw_filter filter
;
233 * rio_mport_db_filter - structure to describe a doorbell filter
234 * @data_node reference to device node
235 * @priv_node node in private data
236 * @priv reference to private data
237 * @filter actual doorbell filter
239 struct rio_mport_db_filter
{
240 struct list_head data_node
;
241 struct list_head priv_node
;
242 struct mport_cdev_priv
*priv
;
243 struct rio_doorbell_filter filter
;
246 static LIST_HEAD(mport_devs
);
247 static DEFINE_MUTEX(mport_devs_lock
);
249 #if (0) /* used by commented out portion of poll function : FIXME */
250 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait
);
253 static struct class *dev_class
;
254 static dev_t dev_number
;
256 static void mport_release_mapping(struct kref
*ref
);
258 static int rio_mport_maint_rd(struct mport_cdev_priv
*priv
, void __user
*arg
,
261 struct rio_mport
*mport
= priv
->md
->mport
;
262 struct rio_mport_maint_io maint_io
;
268 if (unlikely(copy_from_user(&maint_io
, arg
, sizeof(maint_io
))))
271 if ((maint_io
.offset
% 4) ||
272 (maint_io
.length
== 0) || (maint_io
.length
% 4) ||
273 (maint_io
.length
+ maint_io
.offset
) > RIO_MAINT_SPACE_SZ
)
276 buffer
= vmalloc(maint_io
.length
);
279 length
= maint_io
.length
/sizeof(u32
);
280 offset
= maint_io
.offset
;
282 for (i
= 0; i
< length
; i
++) {
284 ret
= __rio_local_read_config_32(mport
,
287 ret
= rio_mport_read_config_32(mport
, maint_io
.rioid
,
288 maint_io
.hopcount
, offset
, &buffer
[i
]);
295 if (unlikely(copy_to_user((void __user
*)(uintptr_t)maint_io
.buffer
,
296 buffer
, maint_io
.length
)))
303 static int rio_mport_maint_wr(struct mport_cdev_priv
*priv
, void __user
*arg
,
306 struct rio_mport
*mport
= priv
->md
->mport
;
307 struct rio_mport_maint_io maint_io
;
311 int ret
= -EINVAL
, i
;
313 if (unlikely(copy_from_user(&maint_io
, arg
, sizeof(maint_io
))))
316 if ((maint_io
.offset
% 4) ||
317 (maint_io
.length
== 0) || (maint_io
.length
% 4) ||
318 (maint_io
.length
+ maint_io
.offset
) > RIO_MAINT_SPACE_SZ
)
321 buffer
= vmalloc(maint_io
.length
);
324 length
= maint_io
.length
;
326 if (unlikely(copy_from_user(buffer
,
327 (void __user
*)(uintptr_t)maint_io
.buffer
, length
))) {
332 offset
= maint_io
.offset
;
333 length
/= sizeof(u32
);
335 for (i
= 0; i
< length
; i
++) {
337 ret
= __rio_local_write_config_32(mport
,
340 ret
= rio_mport_write_config_32(mport
, maint_io
.rioid
,
356 * Inbound/outbound memory mapping functions
359 rio_mport_create_outbound_mapping(struct mport_dev
*md
, struct file
*filp
,
360 u16 rioid
, u64 raddr
, u32 size
,
363 struct rio_mport
*mport
= md
->mport
;
364 struct rio_mport_mapping
*map
;
367 rmcd_debug(OBW
, "did=%d ra=0x%llx sz=0x%x", rioid
, raddr
, size
);
369 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
373 ret
= rio_map_outb_region(mport
, rioid
, raddr
, size
, 0, paddr
);
377 map
->dir
= MAP_OUTBOUND
;
379 map
->rio_addr
= raddr
;
381 map
->phys_addr
= *paddr
;
384 kref_init(&map
->ref
);
385 list_add_tail(&map
->node
, &md
->mappings
);
393 rio_mport_get_outbound_mapping(struct mport_dev
*md
, struct file
*filp
,
394 u16 rioid
, u64 raddr
, u32 size
,
397 struct rio_mport_mapping
*map
;
400 mutex_lock(&md
->buf_mutex
);
401 list_for_each_entry(map
, &md
->mappings
, node
) {
402 if (map
->dir
!= MAP_OUTBOUND
)
404 if (rioid
== map
->rioid
&&
405 raddr
== map
->rio_addr
&& size
== map
->size
) {
406 *paddr
= map
->phys_addr
;
409 } else if (rioid
== map
->rioid
&&
410 raddr
< (map
->rio_addr
+ map
->size
- 1) &&
411 (raddr
+ size
) > map
->rio_addr
) {
417 /* If not found, create new */
419 err
= rio_mport_create_outbound_mapping(md
, filp
, rioid
, raddr
,
421 mutex_unlock(&md
->buf_mutex
);
425 static int rio_mport_obw_map(struct file
*filp
, void __user
*arg
)
427 struct mport_cdev_priv
*priv
= filp
->private_data
;
428 struct mport_dev
*data
= priv
->md
;
433 if (unlikely(copy_from_user(&map
, arg
, sizeof(map
))))
436 rmcd_debug(OBW
, "did=%d ra=0x%llx sz=0x%llx",
437 map
.rioid
, map
.rio_addr
, map
.length
);
439 ret
= rio_mport_get_outbound_mapping(data
, filp
, map
.rioid
,
440 map
.rio_addr
, map
.length
, &paddr
);
442 rmcd_error("Failed to set OBW err= %d", ret
);
448 if (unlikely(copy_to_user(arg
, &map
, sizeof(map
))))
454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
456 * @priv: driver private data
457 * @arg: buffer handle returned by allocation routine
459 static int rio_mport_obw_free(struct file
*filp
, void __user
*arg
)
461 struct mport_cdev_priv
*priv
= filp
->private_data
;
462 struct mport_dev
*md
= priv
->md
;
464 struct rio_mport_mapping
*map
, *_map
;
466 if (!md
->mport
->ops
->unmap_outb
)
467 return -EPROTONOSUPPORT
;
469 if (copy_from_user(&handle
, arg
, sizeof(handle
)))
472 rmcd_debug(OBW
, "h=0x%llx", handle
);
474 mutex_lock(&md
->buf_mutex
);
475 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
476 if (map
->dir
== MAP_OUTBOUND
&& map
->phys_addr
== handle
) {
477 if (map
->filp
== filp
) {
478 rmcd_debug(OBW
, "kref_put h=0x%llx", handle
);
480 kref_put(&map
->ref
, mport_release_mapping
);
485 mutex_unlock(&md
->buf_mutex
);
491 * maint_hdid_set() - Set the host Device ID
492 * @priv: driver private data
495 static int maint_hdid_set(struct mport_cdev_priv
*priv
, void __user
*arg
)
497 struct mport_dev
*md
= priv
->md
;
500 if (copy_from_user(&hdid
, arg
, sizeof(hdid
)))
503 md
->mport
->host_deviceid
= hdid
;
504 md
->properties
.hdid
= hdid
;
505 rio_local_set_device_id(md
->mport
, hdid
);
507 rmcd_debug(MPORT
, "Set host device Id to %d", hdid
);
513 * maint_comptag_set() - Set the host Component Tag
514 * @priv: driver private data
515 * @arg: Component Tag
517 static int maint_comptag_set(struct mport_cdev_priv
*priv
, void __user
*arg
)
519 struct mport_dev
*md
= priv
->md
;
522 if (copy_from_user(&comptag
, arg
, sizeof(comptag
)))
525 rio_local_write_config_32(md
->mport
, RIO_COMPONENT_TAG_CSR
, comptag
);
527 rmcd_debug(MPORT
, "Set host Component Tag to %d", comptag
);
532 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
534 struct mport_dma_req
{
535 struct kref refcount
;
536 struct list_head node
;
538 struct mport_cdev_priv
*priv
;
539 enum rio_transfer_sync sync
;
541 struct page
**page_list
;
542 unsigned int nr_pages
;
543 struct rio_mport_mapping
*map
;
544 struct dma_chan
*dmach
;
545 enum dma_data_direction dir
;
547 enum dma_status status
;
548 struct completion req_comp
;
551 static void mport_release_def_dma(struct kref
*dma_ref
)
553 struct mport_dev
*md
=
554 container_of(dma_ref
, struct mport_dev
, dma_ref
);
556 rmcd_debug(EXIT
, "DMA_%d", md
->dma_chan
->chan_id
);
557 rio_release_dma(md
->dma_chan
);
561 static void mport_release_dma(struct kref
*dma_ref
)
563 struct mport_cdev_priv
*priv
=
564 container_of(dma_ref
, struct mport_cdev_priv
, dma_ref
);
566 rmcd_debug(EXIT
, "DMA_%d", priv
->dmach
->chan_id
);
567 complete(&priv
->comp
);
570 static void dma_req_free(struct kref
*ref
)
572 struct mport_dma_req
*req
= container_of(ref
, struct mport_dma_req
,
574 struct mport_cdev_priv
*priv
= req
->priv
;
577 dma_unmap_sg(req
->dmach
->device
->dev
,
578 req
->sgt
.sgl
, req
->sgt
.nents
, req
->dir
);
579 sg_free_table(&req
->sgt
);
580 if (req
->page_list
) {
581 for (i
= 0; i
< req
->nr_pages
; i
++)
582 put_page(req
->page_list
[i
]);
583 kfree(req
->page_list
);
587 mutex_lock(&req
->map
->md
->buf_mutex
);
588 kref_put(&req
->map
->ref
, mport_release_mapping
);
589 mutex_unlock(&req
->map
->md
->buf_mutex
);
592 kref_put(&priv
->dma_ref
, mport_release_dma
);
597 static void dma_xfer_callback(void *param
)
599 struct mport_dma_req
*req
= (struct mport_dma_req
*)param
;
600 struct mport_cdev_priv
*priv
= req
->priv
;
602 req
->status
= dma_async_is_tx_complete(priv
->dmach
, req
->cookie
,
604 complete(&req
->req_comp
);
605 kref_put(&req
->refcount
, dma_req_free
);
609 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
611 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
612 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
613 * non-NULL pointer using IS_ERR macro.
615 static struct dma_async_tx_descriptor
616 *prep_dma_xfer(struct dma_chan
*chan
, struct rio_transfer_io
*transfer
,
617 struct sg_table
*sgt
, int nents
, enum dma_transfer_direction dir
,
618 enum dma_ctrl_flags flags
)
620 struct rio_dma_data tx_data
;
622 tx_data
.sg
= sgt
->sgl
;
623 tx_data
.sg_len
= nents
;
624 tx_data
.rio_addr_u
= 0;
625 tx_data
.rio_addr
= transfer
->rio_addr
;
626 if (dir
== DMA_MEM_TO_DEV
) {
627 switch (transfer
->method
) {
628 case RIO_EXCHANGE_NWRITE
:
629 tx_data
.wr_type
= RDW_ALL_NWRITE
;
631 case RIO_EXCHANGE_NWRITE_R_ALL
:
632 tx_data
.wr_type
= RDW_ALL_NWRITE_R
;
634 case RIO_EXCHANGE_NWRITE_R
:
635 tx_data
.wr_type
= RDW_LAST_NWRITE_R
;
637 case RIO_EXCHANGE_DEFAULT
:
638 tx_data
.wr_type
= RDW_DEFAULT
;
641 return ERR_PTR(-EINVAL
);
645 return rio_dma_prep_xfer(chan
, transfer
->rioid
, &tx_data
, dir
, flags
);
648 /* Request DMA channel associated with this mport device.
649 * Try to request DMA channel for every new process that opened given
650 * mport. If a new DMA channel is not available use default channel
651 * which is the first DMA channel opened on mport device.
653 static int get_dma_channel(struct mport_cdev_priv
*priv
)
655 mutex_lock(&priv
->dma_lock
);
657 priv
->dmach
= rio_request_mport_dma(priv
->md
->mport
);
659 /* Use default DMA channel if available */
660 if (priv
->md
->dma_chan
) {
661 priv
->dmach
= priv
->md
->dma_chan
;
662 kref_get(&priv
->md
->dma_ref
);
664 rmcd_error("Failed to get DMA channel");
665 mutex_unlock(&priv
->dma_lock
);
668 } else if (!priv
->md
->dma_chan
) {
669 /* Register default DMA channel if we do not have one */
670 priv
->md
->dma_chan
= priv
->dmach
;
671 kref_init(&priv
->md
->dma_ref
);
672 rmcd_debug(DMA
, "Register DMA_chan %d as default",
673 priv
->dmach
->chan_id
);
676 kref_init(&priv
->dma_ref
);
677 init_completion(&priv
->comp
);
680 kref_get(&priv
->dma_ref
);
681 mutex_unlock(&priv
->dma_lock
);
685 static void put_dma_channel(struct mport_cdev_priv
*priv
)
687 kref_put(&priv
->dma_ref
, mport_release_dma
);
691 * DMA transfer functions
693 static int do_dma_request(struct mport_dma_req
*req
,
694 struct rio_transfer_io
*xfer
,
695 enum rio_transfer_sync sync
, int nents
)
697 struct mport_cdev_priv
*priv
;
698 struct sg_table
*sgt
;
699 struct dma_chan
*chan
;
700 struct dma_async_tx_descriptor
*tx
;
702 unsigned long tmo
= msecs_to_jiffies(dma_timeout
);
703 enum dma_transfer_direction dir
;
711 dir
= (req
->dir
== DMA_FROM_DEVICE
) ? DMA_DEV_TO_MEM
: DMA_MEM_TO_DEV
;
713 rmcd_debug(DMA
, "%s(%d) uses %s for DMA_%s",
714 current
->comm
, task_pid_nr(current
),
715 dev_name(&chan
->dev
->device
),
716 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE");
718 /* Initialize DMA transaction request */
719 tx
= prep_dma_xfer(chan
, xfer
, sgt
, nents
, dir
,
720 DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
);
723 rmcd_debug(DMA
, "prep error for %s A:0x%llx L:0x%llx",
724 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
725 xfer
->rio_addr
, xfer
->length
);
728 } else if (IS_ERR(tx
)) {
730 rmcd_debug(DMA
, "prep error %d for %s A:0x%llx L:0x%llx", ret
,
731 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
732 xfer
->rio_addr
, xfer
->length
);
736 tx
->callback
= dma_xfer_callback
;
737 tx
->callback_param
= req
;
739 req
->status
= DMA_IN_PROGRESS
;
740 kref_get(&req
->refcount
);
742 cookie
= dmaengine_submit(tx
);
743 req
->cookie
= cookie
;
745 rmcd_debug(DMA
, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current
),
746 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
748 if (dma_submit_error(cookie
)) {
749 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
750 cookie
, xfer
->rio_addr
, xfer
->length
);
751 kref_put(&req
->refcount
, dma_req_free
);
756 dma_async_issue_pending(chan
);
758 if (sync
== RIO_TRANSFER_ASYNC
) {
759 spin_lock(&priv
->req_lock
);
760 list_add_tail(&req
->node
, &priv
->async_list
);
761 spin_unlock(&priv
->req_lock
);
763 } else if (sync
== RIO_TRANSFER_FAF
)
766 wret
= wait_for_completion_interruptible_timeout(&req
->req_comp
, tmo
);
769 /* Timeout on wait occurred */
770 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
771 current
->comm
, task_pid_nr(current
),
772 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
774 } else if (wret
== -ERESTARTSYS
) {
775 /* Wait_for_completion was interrupted by a signal but DMA may
778 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
779 current
->comm
, task_pid_nr(current
),
780 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE", cookie
);
784 if (req
->status
!= DMA_COMPLETE
) {
785 /* DMA transaction completion was signaled with error */
786 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
787 current
->comm
, task_pid_nr(current
),
788 (dir
== DMA_DEV_TO_MEM
)?"READ":"WRITE",
789 cookie
, req
->status
, ret
);
798 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
799 * the remote RapidIO device
800 * @filp: file pointer associated with the call
801 * @transfer_mode: DMA transfer mode
802 * @sync: synchronization mode
803 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
804 * DMA_DEV_TO_MEM = read)
805 * @xfer: data transfer descriptor structure
808 rio_dma_transfer(struct file
*filp
, u32 transfer_mode
,
809 enum rio_transfer_sync sync
, enum dma_data_direction dir
,
810 struct rio_transfer_io
*xfer
)
812 struct mport_cdev_priv
*priv
= filp
->private_data
;
813 unsigned long nr_pages
= 0;
814 struct page
**page_list
= NULL
;
815 struct mport_dma_req
*req
;
816 struct mport_dev
*md
= priv
->md
;
817 struct dma_chan
*chan
;
821 if (xfer
->length
== 0)
823 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
827 ret
= get_dma_channel(priv
);
834 kref_init(&req
->refcount
);
835 init_completion(&req
->req_comp
);
843 * If parameter loc_addr != NULL, we are transferring data from/to
844 * data buffer allocated in user-space: lock in memory user-space
845 * buffer pages and build an SG table for DMA transfer request
847 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
848 * used for DMA data transfers: build single entry SG table using
849 * offset within the internal buffer specified by handle parameter.
851 if (xfer
->loc_addr
) {
855 offset
= lower_32_bits(offset_in_page(xfer
->loc_addr
));
856 nr_pages
= PAGE_ALIGN(xfer
->length
+ offset
) >> PAGE_SHIFT
;
858 page_list
= kmalloc_array(nr_pages
,
859 sizeof(*page_list
), GFP_KERNEL
);
860 if (page_list
== NULL
) {
865 pinned
= get_user_pages_fast(
866 (unsigned long)xfer
->loc_addr
& PAGE_MASK
,
868 dir
== DMA_FROM_DEVICE
? FOLL_WRITE
: 0,
871 if (pinned
!= nr_pages
) {
873 rmcd_error("get_user_pages_unlocked err=%ld",
877 rmcd_error("pinned %ld out of %ld pages",
881 * Set nr_pages up to mean "how many pages to unpin, in
888 ret
= sg_alloc_table_from_pages(&req
->sgt
, page_list
, nr_pages
,
889 offset
, xfer
->length
, GFP_KERNEL
);
891 rmcd_error("sg_alloc_table failed with err=%d", ret
);
895 req
->page_list
= page_list
;
896 req
->nr_pages
= nr_pages
;
899 struct rio_mport_mapping
*map
;
901 baddr
= (dma_addr_t
)xfer
->handle
;
903 mutex_lock(&md
->buf_mutex
);
904 list_for_each_entry(map
, &md
->mappings
, node
) {
905 if (baddr
>= map
->phys_addr
&&
906 baddr
< (map
->phys_addr
+ map
->size
)) {
912 mutex_unlock(&md
->buf_mutex
);
914 if (req
->map
== NULL
) {
919 if (xfer
->length
+ xfer
->offset
> map
->size
) {
924 ret
= sg_alloc_table(&req
->sgt
, 1, GFP_KERNEL
);
926 rmcd_error("sg_alloc_table failed for internal buf");
930 sg_set_buf(req
->sgt
.sgl
,
931 map
->virt_addr
+ (baddr
- map
->phys_addr
) +
932 xfer
->offset
, xfer
->length
);
935 nents
= dma_map_sg(chan
->device
->dev
,
936 req
->sgt
.sgl
, req
->sgt
.nents
, dir
);
938 rmcd_error("Failed to map SG list");
943 ret
= do_dma_request(req
, xfer
, sync
, nents
);
946 if (sync
== RIO_TRANSFER_ASYNC
)
947 return ret
; /* return ASYNC cookie */
949 rmcd_debug(DMA
, "do_dma_request failed with err=%d", ret
);
953 if (!req
->page_list
) {
954 for (i
= 0; i
< nr_pages
; i
++)
955 put_page(page_list
[i
]);
959 kref_put(&req
->refcount
, dma_req_free
);
963 static int rio_mport_transfer_ioctl(struct file
*filp
, void __user
*arg
)
965 struct mport_cdev_priv
*priv
= filp
->private_data
;
966 struct rio_transaction transaction
;
967 struct rio_transfer_io
*transfer
;
968 enum dma_data_direction dir
;
971 if (unlikely(copy_from_user(&transaction
, arg
, sizeof(transaction
))))
974 if (transaction
.count
!= 1) /* only single transfer for now */
977 if ((transaction
.transfer_mode
&
978 priv
->md
->properties
.transfer_mode
) == 0)
981 transfer
= vmalloc(array_size(sizeof(*transfer
), transaction
.count
));
985 if (unlikely(copy_from_user(transfer
,
986 (void __user
*)(uintptr_t)transaction
.block
,
987 transaction
.count
* sizeof(*transfer
)))) {
992 dir
= (transaction
.dir
== RIO_TRANSFER_DIR_READ
) ?
993 DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
994 for (i
= 0; i
< transaction
.count
&& ret
== 0; i
++)
995 ret
= rio_dma_transfer(filp
, transaction
.transfer_mode
,
996 transaction
.sync
, dir
, &transfer
[i
]);
998 if (unlikely(copy_to_user((void __user
*)(uintptr_t)transaction
.block
,
1000 transaction
.count
* sizeof(*transfer
))))
1009 static int rio_mport_wait_for_async_dma(struct file
*filp
, void __user
*arg
)
1011 struct mport_cdev_priv
*priv
;
1012 struct rio_async_tx_wait w_param
;
1013 struct mport_dma_req
*req
;
1014 dma_cookie_t cookie
;
1020 priv
= (struct mport_cdev_priv
*)filp
->private_data
;
1022 if (unlikely(copy_from_user(&w_param
, arg
, sizeof(w_param
))))
1025 cookie
= w_param
.token
;
1026 if (w_param
.timeout
)
1027 tmo
= msecs_to_jiffies(w_param
.timeout
);
1028 else /* Use default DMA timeout */
1029 tmo
= msecs_to_jiffies(dma_timeout
);
1031 spin_lock(&priv
->req_lock
);
1032 list_for_each_entry(req
, &priv
->async_list
, node
) {
1033 if (req
->cookie
== cookie
) {
1034 list_del(&req
->node
);
1039 spin_unlock(&priv
->req_lock
);
1044 wret
= wait_for_completion_interruptible_timeout(&req
->req_comp
, tmo
);
1047 /* Timeout on wait occurred */
1048 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1049 current
->comm
, task_pid_nr(current
),
1050 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE");
1053 } else if (wret
== -ERESTARTSYS
) {
1054 /* Wait_for_completion was interrupted by a signal but DMA may
1055 * be still in progress
1057 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1058 current
->comm
, task_pid_nr(current
),
1059 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE");
1064 if (req
->status
!= DMA_COMPLETE
) {
1065 /* DMA transaction completion signaled with transfer error */
1066 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1067 current
->comm
, task_pid_nr(current
),
1068 (req
->dir
== DMA_FROM_DEVICE
)?"READ":"WRITE",
1074 if (req
->status
!= DMA_IN_PROGRESS
&& req
->status
!= DMA_PAUSED
)
1075 kref_put(&req
->refcount
, dma_req_free
);
1080 /* Return request back into async queue */
1081 spin_lock(&priv
->req_lock
);
1082 list_add_tail(&req
->node
, &priv
->async_list
);
1083 spin_unlock(&priv
->req_lock
);
1087 static int rio_mport_create_dma_mapping(struct mport_dev
*md
, struct file
*filp
,
1088 u64 size
, struct rio_mport_mapping
**mapping
)
1090 struct rio_mport_mapping
*map
;
1092 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1096 map
->virt_addr
= dma_alloc_coherent(md
->mport
->dev
.parent
, size
,
1097 &map
->phys_addr
, GFP_KERNEL
);
1098 if (map
->virt_addr
== NULL
) {
1107 kref_init(&map
->ref
);
1108 mutex_lock(&md
->buf_mutex
);
1109 list_add_tail(&map
->node
, &md
->mappings
);
1110 mutex_unlock(&md
->buf_mutex
);
1116 static int rio_mport_alloc_dma(struct file
*filp
, void __user
*arg
)
1118 struct mport_cdev_priv
*priv
= filp
->private_data
;
1119 struct mport_dev
*md
= priv
->md
;
1120 struct rio_dma_mem map
;
1121 struct rio_mport_mapping
*mapping
= NULL
;
1124 if (unlikely(copy_from_user(&map
, arg
, sizeof(map
))))
1127 ret
= rio_mport_create_dma_mapping(md
, filp
, map
.length
, &mapping
);
1131 map
.dma_handle
= mapping
->phys_addr
;
1133 if (unlikely(copy_to_user(arg
, &map
, sizeof(map
)))) {
1134 mutex_lock(&md
->buf_mutex
);
1135 kref_put(&mapping
->ref
, mport_release_mapping
);
1136 mutex_unlock(&md
->buf_mutex
);
1143 static int rio_mport_free_dma(struct file
*filp
, void __user
*arg
)
1145 struct mport_cdev_priv
*priv
= filp
->private_data
;
1146 struct mport_dev
*md
= priv
->md
;
1149 struct rio_mport_mapping
*map
, *_map
;
1151 if (copy_from_user(&handle
, arg
, sizeof(handle
)))
1153 rmcd_debug(EXIT
, "filp=%p", filp
);
1155 mutex_lock(&md
->buf_mutex
);
1156 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
1157 if (map
->dir
== MAP_DMA
&& map
->phys_addr
== handle
&&
1158 map
->filp
== filp
) {
1159 kref_put(&map
->ref
, mport_release_mapping
);
1164 mutex_unlock(&md
->buf_mutex
);
1166 if (ret
== -EFAULT
) {
1167 rmcd_debug(DMA
, "ERR no matching mapping");
1174 static int rio_mport_transfer_ioctl(struct file
*filp
, void *arg
)
1179 static int rio_mport_wait_for_async_dma(struct file
*filp
, void __user
*arg
)
1184 static int rio_mport_alloc_dma(struct file
*filp
, void __user
*arg
)
1189 static int rio_mport_free_dma(struct file
*filp
, void __user
*arg
)
1193 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1196 * Inbound/outbound memory mapping functions
1200 rio_mport_create_inbound_mapping(struct mport_dev
*md
, struct file
*filp
,
1201 u64 raddr
, u64 size
,
1202 struct rio_mport_mapping
**mapping
)
1204 struct rio_mport
*mport
= md
->mport
;
1205 struct rio_mport_mapping
*map
;
1208 /* rio_map_inb_region() accepts u32 size */
1209 if (size
> 0xffffffff)
1212 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1216 map
->virt_addr
= dma_alloc_coherent(mport
->dev
.parent
, size
,
1217 &map
->phys_addr
, GFP_KERNEL
);
1218 if (map
->virt_addr
== NULL
) {
1223 if (raddr
== RIO_MAP_ANY_ADDR
)
1224 raddr
= map
->phys_addr
;
1225 ret
= rio_map_inb_region(mport
, map
->phys_addr
, raddr
, (u32
)size
, 0);
1229 map
->dir
= MAP_INBOUND
;
1230 map
->rio_addr
= raddr
;
1234 kref_init(&map
->ref
);
1235 mutex_lock(&md
->buf_mutex
);
1236 list_add_tail(&map
->node
, &md
->mappings
);
1237 mutex_unlock(&md
->buf_mutex
);
1242 dma_free_coherent(mport
->dev
.parent
, size
,
1243 map
->virt_addr
, map
->phys_addr
);
1250 rio_mport_get_inbound_mapping(struct mport_dev
*md
, struct file
*filp
,
1251 u64 raddr
, u64 size
,
1252 struct rio_mport_mapping
**mapping
)
1254 struct rio_mport_mapping
*map
;
1257 if (raddr
== RIO_MAP_ANY_ADDR
)
1260 mutex_lock(&md
->buf_mutex
);
1261 list_for_each_entry(map
, &md
->mappings
, node
) {
1262 if (map
->dir
!= MAP_INBOUND
)
1264 if (raddr
== map
->rio_addr
&& size
== map
->size
) {
1265 /* allow exact match only */
1269 } else if (raddr
< (map
->rio_addr
+ map
->size
- 1) &&
1270 (raddr
+ size
) > map
->rio_addr
) {
1275 mutex_unlock(&md
->buf_mutex
);
1280 /* not found, create new */
1281 return rio_mport_create_inbound_mapping(md
, filp
, raddr
, size
, mapping
);
1284 static int rio_mport_map_inbound(struct file
*filp
, void __user
*arg
)
1286 struct mport_cdev_priv
*priv
= filp
->private_data
;
1287 struct mport_dev
*md
= priv
->md
;
1288 struct rio_mmap map
;
1289 struct rio_mport_mapping
*mapping
= NULL
;
1292 if (!md
->mport
->ops
->map_inb
)
1293 return -EPROTONOSUPPORT
;
1294 if (unlikely(copy_from_user(&map
, arg
, sizeof(map
))))
1297 rmcd_debug(IBW
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
1299 ret
= rio_mport_get_inbound_mapping(md
, filp
, map
.rio_addr
,
1300 map
.length
, &mapping
);
1304 map
.handle
= mapping
->phys_addr
;
1305 map
.rio_addr
= mapping
->rio_addr
;
1307 if (unlikely(copy_to_user(arg
, &map
, sizeof(map
)))) {
1308 /* Delete mapping if it was created by this request */
1309 if (ret
== 0 && mapping
->filp
== filp
) {
1310 mutex_lock(&md
->buf_mutex
);
1311 kref_put(&mapping
->ref
, mport_release_mapping
);
1312 mutex_unlock(&md
->buf_mutex
);
1321 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1322 * previously allocated inbound DMA coherent buffer
1323 * @priv: driver private data
1324 * @arg: buffer handle returned by allocation routine
1326 static int rio_mport_inbound_free(struct file
*filp
, void __user
*arg
)
1328 struct mport_cdev_priv
*priv
= filp
->private_data
;
1329 struct mport_dev
*md
= priv
->md
;
1331 struct rio_mport_mapping
*map
, *_map
;
1333 rmcd_debug(IBW
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
1335 if (!md
->mport
->ops
->unmap_inb
)
1336 return -EPROTONOSUPPORT
;
1338 if (copy_from_user(&handle
, arg
, sizeof(handle
)))
1341 mutex_lock(&md
->buf_mutex
);
1342 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
1343 if (map
->dir
== MAP_INBOUND
&& map
->phys_addr
== handle
) {
1344 if (map
->filp
== filp
) {
1346 kref_put(&map
->ref
, mport_release_mapping
);
1351 mutex_unlock(&md
->buf_mutex
);
1357 * maint_port_idx_get() - Get the port index of the mport instance
1358 * @priv: driver private data
1361 static int maint_port_idx_get(struct mport_cdev_priv
*priv
, void __user
*arg
)
1363 struct mport_dev
*md
= priv
->md
;
1364 u32 port_idx
= md
->mport
->index
;
1366 rmcd_debug(MPORT
, "port_index=%d", port_idx
);
1368 if (copy_to_user(arg
, &port_idx
, sizeof(port_idx
)))
1374 static int rio_mport_add_event(struct mport_cdev_priv
*priv
,
1375 struct rio_event
*event
)
1379 if (!(priv
->event_mask
& event
->header
))
1382 spin_lock(&priv
->fifo_lock
);
1383 overflow
= kfifo_avail(&priv
->event_fifo
) < sizeof(*event
)
1384 || kfifo_in(&priv
->event_fifo
, (unsigned char *)event
,
1385 sizeof(*event
)) != sizeof(*event
);
1386 spin_unlock(&priv
->fifo_lock
);
1388 wake_up_interruptible(&priv
->event_rx_wait
);
1391 dev_warn(&priv
->md
->dev
, DRV_NAME
": event fifo overflow\n");
1398 static void rio_mport_doorbell_handler(struct rio_mport
*mport
, void *dev_id
,
1399 u16 src
, u16 dst
, u16 info
)
1401 struct mport_dev
*data
= dev_id
;
1402 struct mport_cdev_priv
*priv
;
1403 struct rio_mport_db_filter
*db_filter
;
1404 struct rio_event event
;
1407 event
.header
= RIO_DOORBELL
;
1408 event
.u
.doorbell
.rioid
= src
;
1409 event
.u
.doorbell
.payload
= info
;
1412 spin_lock(&data
->db_lock
);
1413 list_for_each_entry(db_filter
, &data
->doorbells
, data_node
) {
1414 if (((db_filter
->filter
.rioid
== RIO_INVALID_DESTID
||
1415 db_filter
->filter
.rioid
== src
)) &&
1416 info
>= db_filter
->filter
.low
&&
1417 info
<= db_filter
->filter
.high
) {
1418 priv
= db_filter
->priv
;
1419 rio_mport_add_event(priv
, &event
);
1423 spin_unlock(&data
->db_lock
);
1426 dev_warn(&data
->dev
,
1427 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1428 __func__
, src
, info
);
1431 static int rio_mport_add_db_filter(struct mport_cdev_priv
*priv
,
1434 struct mport_dev
*md
= priv
->md
;
1435 struct rio_mport_db_filter
*db_filter
;
1436 struct rio_doorbell_filter filter
;
1437 unsigned long flags
;
1440 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1443 if (filter
.low
> filter
.high
)
1446 ret
= rio_request_inb_dbell(md
->mport
, md
, filter
.low
, filter
.high
,
1447 rio_mport_doorbell_handler
);
1449 rmcd_error("%s failed to register IBDB, err=%d",
1450 dev_name(&md
->dev
), ret
);
1454 db_filter
= kzalloc(sizeof(*db_filter
), GFP_KERNEL
);
1455 if (db_filter
== NULL
) {
1456 rio_release_inb_dbell(md
->mport
, filter
.low
, filter
.high
);
1460 db_filter
->filter
= filter
;
1461 db_filter
->priv
= priv
;
1462 spin_lock_irqsave(&md
->db_lock
, flags
);
1463 list_add_tail(&db_filter
->priv_node
, &priv
->db_filters
);
1464 list_add_tail(&db_filter
->data_node
, &md
->doorbells
);
1465 spin_unlock_irqrestore(&md
->db_lock
, flags
);
1470 static void rio_mport_delete_db_filter(struct rio_mport_db_filter
*db_filter
)
1472 list_del(&db_filter
->data_node
);
1473 list_del(&db_filter
->priv_node
);
1477 static int rio_mport_remove_db_filter(struct mport_cdev_priv
*priv
,
1480 struct rio_mport_db_filter
*db_filter
;
1481 struct rio_doorbell_filter filter
;
1482 unsigned long flags
;
1485 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1488 if (filter
.low
> filter
.high
)
1491 spin_lock_irqsave(&priv
->md
->db_lock
, flags
);
1492 list_for_each_entry(db_filter
, &priv
->db_filters
, priv_node
) {
1493 if (db_filter
->filter
.rioid
== filter
.rioid
&&
1494 db_filter
->filter
.low
== filter
.low
&&
1495 db_filter
->filter
.high
== filter
.high
) {
1496 rio_mport_delete_db_filter(db_filter
);
1501 spin_unlock_irqrestore(&priv
->md
->db_lock
, flags
);
1504 rio_release_inb_dbell(priv
->md
->mport
, filter
.low
, filter
.high
);
1509 static int rio_mport_match_pw(union rio_pw_msg
*msg
,
1510 struct rio_pw_filter
*filter
)
1512 if ((msg
->em
.comptag
& filter
->mask
) < filter
->low
||
1513 (msg
->em
.comptag
& filter
->mask
) > filter
->high
)
1518 static int rio_mport_pw_handler(struct rio_mport
*mport
, void *context
,
1519 union rio_pw_msg
*msg
, int step
)
1521 struct mport_dev
*md
= context
;
1522 struct mport_cdev_priv
*priv
;
1523 struct rio_mport_pw_filter
*pw_filter
;
1524 struct rio_event event
;
1527 event
.header
= RIO_PORTWRITE
;
1528 memcpy(event
.u
.portwrite
.payload
, msg
->raw
, RIO_PW_MSG_SIZE
);
1531 spin_lock(&md
->pw_lock
);
1532 list_for_each_entry(pw_filter
, &md
->portwrites
, md_node
) {
1533 if (rio_mport_match_pw(msg
, &pw_filter
->filter
)) {
1534 priv
= pw_filter
->priv
;
1535 rio_mport_add_event(priv
, &event
);
1539 spin_unlock(&md
->pw_lock
);
1542 printk_ratelimited(KERN_WARNING DRV_NAME
1543 ": mport%d received spurious PW from 0x%08x\n",
1544 mport
->id
, msg
->em
.comptag
);
1550 static int rio_mport_add_pw_filter(struct mport_cdev_priv
*priv
,
1553 struct mport_dev
*md
= priv
->md
;
1554 struct rio_mport_pw_filter
*pw_filter
;
1555 struct rio_pw_filter filter
;
1556 unsigned long flags
;
1559 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1562 pw_filter
= kzalloc(sizeof(*pw_filter
), GFP_KERNEL
);
1563 if (pw_filter
== NULL
)
1566 pw_filter
->filter
= filter
;
1567 pw_filter
->priv
= priv
;
1568 spin_lock_irqsave(&md
->pw_lock
, flags
);
1569 if (list_empty(&md
->portwrites
))
1571 list_add_tail(&pw_filter
->priv_node
, &priv
->pw_filters
);
1572 list_add_tail(&pw_filter
->md_node
, &md
->portwrites
);
1573 spin_unlock_irqrestore(&md
->pw_lock
, flags
);
1578 ret
= rio_add_mport_pw_handler(md
->mport
, md
,
1579 rio_mport_pw_handler
);
1582 "%s: failed to add IB_PW handler, err=%d\n",
1586 rio_pw_enable(md
->mport
, 1);
1592 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter
*pw_filter
)
1594 list_del(&pw_filter
->md_node
);
1595 list_del(&pw_filter
->priv_node
);
1599 static int rio_mport_match_pw_filter(struct rio_pw_filter
*a
,
1600 struct rio_pw_filter
*b
)
1602 if ((a
->mask
== b
->mask
) && (a
->low
== b
->low
) && (a
->high
== b
->high
))
1607 static int rio_mport_remove_pw_filter(struct mport_cdev_priv
*priv
,
1610 struct mport_dev
*md
= priv
->md
;
1611 struct rio_mport_pw_filter
*pw_filter
;
1612 struct rio_pw_filter filter
;
1613 unsigned long flags
;
1617 if (copy_from_user(&filter
, arg
, sizeof(filter
)))
1620 spin_lock_irqsave(&md
->pw_lock
, flags
);
1621 list_for_each_entry(pw_filter
, &priv
->pw_filters
, priv_node
) {
1622 if (rio_mport_match_pw_filter(&pw_filter
->filter
, &filter
)) {
1623 rio_mport_delete_pw_filter(pw_filter
);
1629 if (list_empty(&md
->portwrites
))
1631 spin_unlock_irqrestore(&md
->pw_lock
, flags
);
1634 rio_del_mport_pw_handler(md
->mport
, priv
->md
,
1635 rio_mport_pw_handler
);
1636 rio_pw_enable(md
->mport
, 0);
1643 * rio_release_dev - release routine for kernel RIO device object
1644 * @dev: kernel device object associated with a RIO device structure
1646 * Frees a RIO device struct associated a RIO device struct.
1647 * The RIO device struct is freed.
1649 static void rio_release_dev(struct device
*dev
)
1651 struct rio_dev
*rdev
;
1653 rdev
= to_rio_dev(dev
);
1654 pr_info(DRV_PREFIX
"%s: %s\n", __func__
, rio_name(rdev
));
1659 static void rio_release_net(struct device
*dev
)
1661 struct rio_net
*net
;
1663 net
= to_rio_net(dev
);
1664 rmcd_debug(RDEV
, "net_%d", net
->id
);
1670 * rio_mport_add_riodev - creates a kernel RIO device object
1672 * Allocates a RIO device data structure and initializes required fields based
1673 * on device's configuration space contents.
1674 * If the device has switch capabilities, then a switch specific portion is
1675 * allocated and configured.
1677 static int rio_mport_add_riodev(struct mport_cdev_priv
*priv
,
1680 struct mport_dev
*md
= priv
->md
;
1681 struct rio_rdev_info dev_info
;
1682 struct rio_dev
*rdev
;
1683 struct rio_switch
*rswitch
= NULL
;
1684 struct rio_mport
*mport
;
1692 if (copy_from_user(&dev_info
, arg
, sizeof(dev_info
)))
1694 dev_info
.name
[sizeof(dev_info
.name
) - 1] = '\0';
1696 rmcd_debug(RDEV
, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info
.name
,
1697 dev_info
.comptag
, dev_info
.destid
, dev_info
.hopcount
);
1699 if (bus_find_device_by_name(&rio_bus_type
, NULL
, dev_info
.name
)) {
1700 rmcd_debug(RDEV
, "device %s already exists", dev_info
.name
);
1704 size
= sizeof(*rdev
);
1706 destid
= dev_info
.destid
;
1707 hopcount
= dev_info
.hopcount
;
1709 if (rio_mport_read_config_32(mport
, destid
, hopcount
,
1710 RIO_PEF_CAR
, &rval
))
1713 if (rval
& RIO_PEF_SWITCH
) {
1714 rio_mport_read_config_32(mport
, destid
, hopcount
,
1715 RIO_SWP_INFO_CAR
, &swpinfo
);
1716 size
+= (RIO_GET_TOTAL_PORTS(swpinfo
) *
1717 sizeof(rswitch
->nextdev
[0])) + sizeof(*rswitch
);
1720 rdev
= kzalloc(size
, GFP_KERNEL
);
1724 if (mport
->net
== NULL
) {
1725 struct rio_net
*net
;
1727 net
= rio_alloc_net(mport
);
1730 rmcd_debug(RDEV
, "failed to allocate net object");
1734 net
->id
= mport
->id
;
1736 dev_set_name(&net
->dev
, "rnet_%d", net
->id
);
1737 net
->dev
.parent
= &mport
->dev
;
1738 net
->dev
.release
= rio_release_net
;
1739 err
= rio_add_net(net
);
1741 rmcd_debug(RDEV
, "failed to register net, err=%d", err
);
1747 rdev
->net
= mport
->net
;
1749 rdev
->swpinfo
= swpinfo
;
1750 rio_mport_read_config_32(mport
, destid
, hopcount
,
1751 RIO_DEV_ID_CAR
, &rval
);
1752 rdev
->did
= rval
>> 16;
1753 rdev
->vid
= rval
& 0xffff;
1754 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_DEV_INFO_CAR
,
1756 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_ASM_ID_CAR
,
1758 rdev
->asm_did
= rval
>> 16;
1759 rdev
->asm_vid
= rval
& 0xffff;
1760 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_ASM_INFO_CAR
,
1762 rdev
->asm_rev
= rval
>> 16;
1764 if (rdev
->pef
& RIO_PEF_EXT_FEATURES
) {
1765 rdev
->efptr
= rval
& 0xffff;
1766 rdev
->phys_efptr
= rio_mport_get_physefb(mport
, 0, destid
,
1767 hopcount
, &rdev
->phys_rmap
);
1769 rdev
->em_efptr
= rio_mport_get_feature(mport
, 0, destid
,
1770 hopcount
, RIO_EFB_ERR_MGMNT
);
1773 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_SRC_OPS_CAR
,
1775 rio_mport_read_config_32(mport
, destid
, hopcount
, RIO_DST_OPS_CAR
,
1778 rdev
->comp_tag
= dev_info
.comptag
;
1779 rdev
->destid
= destid
;
1780 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1781 rdev
->hopcount
= hopcount
;
1783 if (rdev
->pef
& RIO_PEF_SWITCH
) {
1784 rswitch
= rdev
->rswitch
;
1785 rswitch
->route_table
= NULL
;
1788 if (strlen(dev_info
.name
))
1789 dev_set_name(&rdev
->dev
, "%s", dev_info
.name
);
1790 else if (rdev
->pef
& RIO_PEF_SWITCH
)
1791 dev_set_name(&rdev
->dev
, "%02x:s:%04x", mport
->id
,
1792 rdev
->comp_tag
& RIO_CTAG_UDEVID
);
1794 dev_set_name(&rdev
->dev
, "%02x:e:%04x", mport
->id
,
1795 rdev
->comp_tag
& RIO_CTAG_UDEVID
);
1797 INIT_LIST_HEAD(&rdev
->net_list
);
1798 rdev
->dev
.parent
= &mport
->net
->dev
;
1799 rio_attach_device(rdev
);
1800 rdev
->dev
.release
= rio_release_dev
;
1802 if (rdev
->dst_ops
& RIO_DST_OPS_DOORBELL
)
1803 rio_init_dbell_res(&rdev
->riores
[RIO_DOORBELL_RESOURCE
],
1805 err
= rio_add_device(rdev
);
1816 static int rio_mport_del_riodev(struct mport_cdev_priv
*priv
, void __user
*arg
)
1818 struct rio_rdev_info dev_info
;
1819 struct rio_dev
*rdev
= NULL
;
1821 struct rio_mport
*mport
;
1822 struct rio_net
*net
;
1824 if (copy_from_user(&dev_info
, arg
, sizeof(dev_info
)))
1826 dev_info
.name
[sizeof(dev_info
.name
) - 1] = '\0';
1828 mport
= priv
->md
->mport
;
1830 /* If device name is specified, removal by name has priority */
1831 if (strlen(dev_info
.name
)) {
1832 dev
= bus_find_device_by_name(&rio_bus_type
, NULL
,
1835 rdev
= to_rio_dev(dev
);
1838 rdev
= rio_get_comptag(dev_info
.comptag
, rdev
);
1839 if (rdev
&& rdev
->dev
.parent
== &mport
->net
->dev
&&
1840 rdev
->destid
== dev_info
.destid
&&
1841 rdev
->hopcount
== dev_info
.hopcount
)
1848 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1849 dev_info
.name
, dev_info
.comptag
, dev_info
.destid
,
1856 rio_del_device(rdev
, RIO_DEVICE_SHUTDOWN
);
1858 if (list_empty(&net
->devices
)) {
1867 * Mport cdev management
1871 * mport_cdev_open() - Open character device (mport)
1873 static int mport_cdev_open(struct inode
*inode
, struct file
*filp
)
1876 int minor
= iminor(inode
);
1877 struct mport_dev
*chdev
;
1878 struct mport_cdev_priv
*priv
;
1880 /* Test for valid device */
1881 if (minor
>= RIO_MAX_MPORTS
) {
1882 rmcd_error("Invalid minor device number");
1886 chdev
= container_of(inode
->i_cdev
, struct mport_dev
, cdev
);
1888 rmcd_debug(INIT
, "%s filp=%p", dev_name(&chdev
->dev
), filp
);
1890 if (atomic_read(&chdev
->active
) == 0)
1893 get_device(&chdev
->dev
);
1895 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1897 put_device(&chdev
->dev
);
1903 mutex_lock(&chdev
->file_mutex
);
1904 list_add_tail(&priv
->list
, &chdev
->file_list
);
1905 mutex_unlock(&chdev
->file_mutex
);
1907 INIT_LIST_HEAD(&priv
->db_filters
);
1908 INIT_LIST_HEAD(&priv
->pw_filters
);
1909 spin_lock_init(&priv
->fifo_lock
);
1910 init_waitqueue_head(&priv
->event_rx_wait
);
1911 ret
= kfifo_alloc(&priv
->event_fifo
,
1912 sizeof(struct rio_event
) * MPORT_EVENT_DEPTH
,
1915 dev_err(&chdev
->dev
, DRV_NAME
": kfifo_alloc failed\n");
1920 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1921 INIT_LIST_HEAD(&priv
->async_list
);
1922 spin_lock_init(&priv
->req_lock
);
1923 mutex_init(&priv
->dma_lock
);
1926 filp
->private_data
= priv
;
1934 static int mport_cdev_fasync(int fd
, struct file
*filp
, int mode
)
1936 struct mport_cdev_priv
*priv
= filp
->private_data
;
1938 return fasync_helper(fd
, filp
, mode
, &priv
->async_queue
);
1941 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1942 static void mport_cdev_release_dma(struct file
*filp
)
1944 struct mport_cdev_priv
*priv
= filp
->private_data
;
1945 struct mport_dev
*md
;
1946 struct mport_dma_req
*req
, *req_next
;
1947 unsigned long tmo
= msecs_to_jiffies(dma_timeout
);
1951 rmcd_debug(EXIT
, "from filp=%p %s(%d)",
1952 filp
, current
->comm
, task_pid_nr(current
));
1955 rmcd_debug(EXIT
, "No DMA channel for filp=%p", filp
);
1961 spin_lock(&priv
->req_lock
);
1962 if (!list_empty(&priv
->async_list
)) {
1963 rmcd_debug(EXIT
, "async list not empty filp=%p %s(%d)",
1964 filp
, current
->comm
, task_pid_nr(current
));
1965 list_splice_init(&priv
->async_list
, &list
);
1967 spin_unlock(&priv
->req_lock
);
1969 if (!list_empty(&list
)) {
1970 rmcd_debug(EXIT
, "temp list not empty");
1971 list_for_each_entry_safe(req
, req_next
, &list
, node
) {
1972 rmcd_debug(EXIT
, "free req->filp=%p cookie=%d compl=%s",
1973 req
->filp
, req
->cookie
,
1974 completion_done(&req
->req_comp
)?"yes":"no");
1975 list_del(&req
->node
);
1976 kref_put(&req
->refcount
, dma_req_free
);
1980 put_dma_channel(priv
);
1981 wret
= wait_for_completion_interruptible_timeout(&priv
->comp
, tmo
);
1984 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1985 current
->comm
, task_pid_nr(current
), wret
);
1988 if (priv
->dmach
!= priv
->md
->dma_chan
) {
1989 rmcd_debug(EXIT
, "Release DMA channel for filp=%p %s(%d)",
1990 filp
, current
->comm
, task_pid_nr(current
));
1991 rio_release_dma(priv
->dmach
);
1993 rmcd_debug(EXIT
, "Adjust default DMA channel refcount");
1994 kref_put(&md
->dma_ref
, mport_release_def_dma
);
2000 #define mport_cdev_release_dma(priv) do {} while (0)
2004 * mport_cdev_release() - Release character device
2006 static int mport_cdev_release(struct inode
*inode
, struct file
*filp
)
2008 struct mport_cdev_priv
*priv
= filp
->private_data
;
2009 struct mport_dev
*chdev
;
2010 struct rio_mport_pw_filter
*pw_filter
, *pw_filter_next
;
2011 struct rio_mport_db_filter
*db_filter
, *db_filter_next
;
2012 struct rio_mport_mapping
*map
, *_map
;
2013 unsigned long flags
;
2015 rmcd_debug(EXIT
, "%s filp=%p", dev_name(&priv
->md
->dev
), filp
);
2018 mport_cdev_release_dma(filp
);
2020 priv
->event_mask
= 0;
2022 spin_lock_irqsave(&chdev
->pw_lock
, flags
);
2023 if (!list_empty(&priv
->pw_filters
)) {
2024 list_for_each_entry_safe(pw_filter
, pw_filter_next
,
2025 &priv
->pw_filters
, priv_node
)
2026 rio_mport_delete_pw_filter(pw_filter
);
2028 spin_unlock_irqrestore(&chdev
->pw_lock
, flags
);
2030 spin_lock_irqsave(&chdev
->db_lock
, flags
);
2031 list_for_each_entry_safe(db_filter
, db_filter_next
,
2032 &priv
->db_filters
, priv_node
) {
2033 rio_mport_delete_db_filter(db_filter
);
2035 spin_unlock_irqrestore(&chdev
->db_lock
, flags
);
2037 kfifo_free(&priv
->event_fifo
);
2039 mutex_lock(&chdev
->buf_mutex
);
2040 list_for_each_entry_safe(map
, _map
, &chdev
->mappings
, node
) {
2041 if (map
->filp
== filp
) {
2042 rmcd_debug(EXIT
, "release mapping %p filp=%p",
2043 map
->virt_addr
, filp
);
2044 kref_put(&map
->ref
, mport_release_mapping
);
2047 mutex_unlock(&chdev
->buf_mutex
);
2049 mport_cdev_fasync(-1, filp
, 0);
2050 filp
->private_data
= NULL
;
2051 mutex_lock(&chdev
->file_mutex
);
2052 list_del(&priv
->list
);
2053 mutex_unlock(&chdev
->file_mutex
);
2054 put_device(&chdev
->dev
);
2060 * mport_cdev_ioctl() - IOCTLs for character device
2062 static long mport_cdev_ioctl(struct file
*filp
,
2063 unsigned int cmd
, unsigned long arg
)
2066 struct mport_cdev_priv
*data
= filp
->private_data
;
2067 struct mport_dev
*md
= data
->md
;
2069 if (atomic_read(&md
->active
) == 0)
2073 case RIO_MPORT_MAINT_READ_LOCAL
:
2074 return rio_mport_maint_rd(data
, (void __user
*)arg
, 1);
2075 case RIO_MPORT_MAINT_WRITE_LOCAL
:
2076 return rio_mport_maint_wr(data
, (void __user
*)arg
, 1);
2077 case RIO_MPORT_MAINT_READ_REMOTE
:
2078 return rio_mport_maint_rd(data
, (void __user
*)arg
, 0);
2079 case RIO_MPORT_MAINT_WRITE_REMOTE
:
2080 return rio_mport_maint_wr(data
, (void __user
*)arg
, 0);
2081 case RIO_MPORT_MAINT_HDID_SET
:
2082 return maint_hdid_set(data
, (void __user
*)arg
);
2083 case RIO_MPORT_MAINT_COMPTAG_SET
:
2084 return maint_comptag_set(data
, (void __user
*)arg
);
2085 case RIO_MPORT_MAINT_PORT_IDX_GET
:
2086 return maint_port_idx_get(data
, (void __user
*)arg
);
2087 case RIO_MPORT_GET_PROPERTIES
:
2088 md
->properties
.hdid
= md
->mport
->host_deviceid
;
2089 if (copy_to_user((void __user
*)arg
, &(md
->properties
),
2090 sizeof(md
->properties
)))
2093 case RIO_ENABLE_DOORBELL_RANGE
:
2094 return rio_mport_add_db_filter(data
, (void __user
*)arg
);
2095 case RIO_DISABLE_DOORBELL_RANGE
:
2096 return rio_mport_remove_db_filter(data
, (void __user
*)arg
);
2097 case RIO_ENABLE_PORTWRITE_RANGE
:
2098 return rio_mport_add_pw_filter(data
, (void __user
*)arg
);
2099 case RIO_DISABLE_PORTWRITE_RANGE
:
2100 return rio_mport_remove_pw_filter(data
, (void __user
*)arg
);
2101 case RIO_SET_EVENT_MASK
:
2102 data
->event_mask
= (u32
)arg
;
2104 case RIO_GET_EVENT_MASK
:
2105 if (copy_to_user((void __user
*)arg
, &data
->event_mask
,
2109 case RIO_MAP_OUTBOUND
:
2110 return rio_mport_obw_map(filp
, (void __user
*)arg
);
2111 case RIO_MAP_INBOUND
:
2112 return rio_mport_map_inbound(filp
, (void __user
*)arg
);
2113 case RIO_UNMAP_OUTBOUND
:
2114 return rio_mport_obw_free(filp
, (void __user
*)arg
);
2115 case RIO_UNMAP_INBOUND
:
2116 return rio_mport_inbound_free(filp
, (void __user
*)arg
);
2118 return rio_mport_alloc_dma(filp
, (void __user
*)arg
);
2120 return rio_mport_free_dma(filp
, (void __user
*)arg
);
2121 case RIO_WAIT_FOR_ASYNC
:
2122 return rio_mport_wait_for_async_dma(filp
, (void __user
*)arg
);
2124 return rio_mport_transfer_ioctl(filp
, (void __user
*)arg
);
2126 return rio_mport_add_riodev(data
, (void __user
*)arg
);
2128 return rio_mport_del_riodev(data
, (void __user
*)arg
);
2137 * mport_release_mapping - free mapping resources and info structure
2138 * @ref: a pointer to the kref within struct rio_mport_mapping
2140 * NOTE: Shall be called while holding buf_mutex.
2142 static void mport_release_mapping(struct kref
*ref
)
2144 struct rio_mport_mapping
*map
=
2145 container_of(ref
, struct rio_mport_mapping
, ref
);
2146 struct rio_mport
*mport
= map
->md
->mport
;
2148 rmcd_debug(MMAP
, "type %d mapping @ %p (phys = %pad) for %s",
2149 map
->dir
, map
->virt_addr
,
2150 &map
->phys_addr
, mport
->name
);
2152 list_del(&map
->node
);
2156 rio_unmap_inb_region(mport
, map
->phys_addr
);
2159 dma_free_coherent(mport
->dev
.parent
, map
->size
,
2160 map
->virt_addr
, map
->phys_addr
);
2163 rio_unmap_outb_region(mport
, map
->rioid
, map
->rio_addr
);
2169 static void mport_mm_open(struct vm_area_struct
*vma
)
2171 struct rio_mport_mapping
*map
= vma
->vm_private_data
;
2173 rmcd_debug(MMAP
, "%pad", &map
->phys_addr
);
2174 kref_get(&map
->ref
);
2177 static void mport_mm_close(struct vm_area_struct
*vma
)
2179 struct rio_mport_mapping
*map
= vma
->vm_private_data
;
2181 rmcd_debug(MMAP
, "%pad", &map
->phys_addr
);
2182 mutex_lock(&map
->md
->buf_mutex
);
2183 kref_put(&map
->ref
, mport_release_mapping
);
2184 mutex_unlock(&map
->md
->buf_mutex
);
2187 static const struct vm_operations_struct vm_ops
= {
2188 .open
= mport_mm_open
,
2189 .close
= mport_mm_close
,
2192 static int mport_cdev_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2194 struct mport_cdev_priv
*priv
= filp
->private_data
;
2195 struct mport_dev
*md
;
2196 size_t size
= vma
->vm_end
- vma
->vm_start
;
2198 unsigned long offset
;
2200 struct rio_mport_mapping
*map
;
2202 rmcd_debug(MMAP
, "0x%x bytes at offset 0x%lx",
2203 (unsigned int)size
, vma
->vm_pgoff
);
2206 baddr
= ((dma_addr_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
2208 mutex_lock(&md
->buf_mutex
);
2209 list_for_each_entry(map
, &md
->mappings
, node
) {
2210 if (baddr
>= map
->phys_addr
&&
2211 baddr
< (map
->phys_addr
+ map
->size
)) {
2216 mutex_unlock(&md
->buf_mutex
);
2221 offset
= baddr
- map
->phys_addr
;
2223 if (size
+ offset
> map
->size
)
2226 vma
->vm_pgoff
= offset
>> PAGE_SHIFT
;
2227 rmcd_debug(MMAP
, "MMAP adjusted offset = 0x%lx", vma
->vm_pgoff
);
2229 if (map
->dir
== MAP_INBOUND
|| map
->dir
== MAP_DMA
)
2230 ret
= dma_mmap_coherent(md
->mport
->dev
.parent
, vma
,
2231 map
->virt_addr
, map
->phys_addr
, map
->size
);
2232 else if (map
->dir
== MAP_OUTBOUND
) {
2233 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
2234 ret
= vm_iomap_memory(vma
, map
->phys_addr
, map
->size
);
2236 rmcd_error("Attempt to mmap unsupported mapping type");
2241 vma
->vm_private_data
= map
;
2242 vma
->vm_ops
= &vm_ops
;
2245 rmcd_error("MMAP exit with err=%d", ret
);
2251 static __poll_t
mport_cdev_poll(struct file
*filp
, poll_table
*wait
)
2253 struct mport_cdev_priv
*priv
= filp
->private_data
;
2255 poll_wait(filp
, &priv
->event_rx_wait
, wait
);
2256 if (kfifo_len(&priv
->event_fifo
))
2257 return EPOLLIN
| EPOLLRDNORM
;
2262 static ssize_t
mport_read(struct file
*filp
, char __user
*buf
, size_t count
,
2265 struct mport_cdev_priv
*priv
= filp
->private_data
;
2272 if (kfifo_is_empty(&priv
->event_fifo
) &&
2273 (filp
->f_flags
& O_NONBLOCK
))
2276 if (count
% sizeof(struct rio_event
))
2279 ret
= wait_event_interruptible(priv
->event_rx_wait
,
2280 kfifo_len(&priv
->event_fifo
) != 0);
2284 while (ret
< count
) {
2285 if (kfifo_to_user(&priv
->event_fifo
, buf
,
2286 sizeof(struct rio_event
), &copied
))
2295 static ssize_t
mport_write(struct file
*filp
, const char __user
*buf
,
2296 size_t count
, loff_t
*ppos
)
2298 struct mport_cdev_priv
*priv
= filp
->private_data
;
2299 struct rio_mport
*mport
= priv
->md
->mport
;
2300 struct rio_event event
;
2306 if (count
% sizeof(event
))
2310 while ((count
- len
) >= (int)sizeof(event
)) {
2311 if (copy_from_user(&event
, buf
, sizeof(event
)))
2314 if (event
.header
!= RIO_DOORBELL
)
2317 ret
= rio_mport_send_doorbell(mport
,
2318 event
.u
.doorbell
.rioid
,
2319 event
.u
.doorbell
.payload
);
2323 len
+= sizeof(event
);
2324 buf
+= sizeof(event
);
2330 static const struct file_operations mport_fops
= {
2331 .owner
= THIS_MODULE
,
2332 .open
= mport_cdev_open
,
2333 .release
= mport_cdev_release
,
2334 .poll
= mport_cdev_poll
,
2336 .write
= mport_write
,
2337 .mmap
= mport_cdev_mmap
,
2338 .fasync
= mport_cdev_fasync
,
2339 .unlocked_ioctl
= mport_cdev_ioctl
2343 * Character device management
2346 static void mport_device_release(struct device
*dev
)
2348 struct mport_dev
*md
;
2350 rmcd_debug(EXIT
, "%s", dev_name(dev
));
2351 md
= container_of(dev
, struct mport_dev
, dev
);
2356 * mport_cdev_add() - Create mport_dev from rio_mport
2357 * @mport: RapidIO master port
2359 static struct mport_dev
*mport_cdev_add(struct rio_mport
*mport
)
2362 struct mport_dev
*md
;
2363 struct rio_mport_attr attr
;
2365 md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
2367 rmcd_error("Unable allocate a device object");
2372 mutex_init(&md
->buf_mutex
);
2373 mutex_init(&md
->file_mutex
);
2374 INIT_LIST_HEAD(&md
->file_list
);
2376 device_initialize(&md
->dev
);
2377 md
->dev
.devt
= MKDEV(MAJOR(dev_number
), mport
->id
);
2378 md
->dev
.class = dev_class
;
2379 md
->dev
.parent
= &mport
->dev
;
2380 md
->dev
.release
= mport_device_release
;
2381 dev_set_name(&md
->dev
, DEV_NAME
"%d", mport
->id
);
2382 atomic_set(&md
->active
, 1);
2384 cdev_init(&md
->cdev
, &mport_fops
);
2385 md
->cdev
.owner
= THIS_MODULE
;
2387 ret
= cdev_device_add(&md
->cdev
, &md
->dev
);
2389 rmcd_error("Failed to register mport %d (err=%d)",
2394 INIT_LIST_HEAD(&md
->doorbells
);
2395 spin_lock_init(&md
->db_lock
);
2396 INIT_LIST_HEAD(&md
->portwrites
);
2397 spin_lock_init(&md
->pw_lock
);
2398 INIT_LIST_HEAD(&md
->mappings
);
2400 md
->properties
.id
= mport
->id
;
2401 md
->properties
.sys_size
= mport
->sys_size
;
2402 md
->properties
.hdid
= mport
->host_deviceid
;
2403 md
->properties
.index
= mport
->index
;
2405 /* The transfer_mode property will be returned through mport query
2408 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2409 md
->properties
.transfer_mode
|= RIO_TRANSFER_MODE_MAPPED
;
2411 md
->properties
.transfer_mode
|= RIO_TRANSFER_MODE_TRANSFER
;
2413 ret
= rio_query_mport(mport
, &attr
);
2415 md
->properties
.flags
= attr
.flags
;
2416 md
->properties
.link_speed
= attr
.link_speed
;
2417 md
->properties
.link_width
= attr
.link_width
;
2418 md
->properties
.dma_max_sge
= attr
.dma_max_sge
;
2419 md
->properties
.dma_max_size
= attr
.dma_max_size
;
2420 md
->properties
.dma_align
= attr
.dma_align
;
2421 md
->properties
.cap_sys_size
= 0;
2422 md
->properties
.cap_transfer_mode
= 0;
2423 md
->properties
.cap_addr_size
= 0;
2425 pr_info(DRV_PREFIX
"Failed to obtain info for %s cdev(%d:%d)\n",
2426 mport
->name
, MAJOR(dev_number
), mport
->id
);
2428 mutex_lock(&mport_devs_lock
);
2429 list_add_tail(&md
->node
, &mport_devs
);
2430 mutex_unlock(&mport_devs_lock
);
2432 pr_info(DRV_PREFIX
"Added %s cdev(%d:%d)\n",
2433 mport
->name
, MAJOR(dev_number
), mport
->id
);
2438 put_device(&md
->dev
);
2443 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2444 * associated DMA channels.
2446 static void mport_cdev_terminate_dma(struct mport_dev
*md
)
2448 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2449 struct mport_cdev_priv
*client
;
2451 rmcd_debug(DMA
, "%s", dev_name(&md
->dev
));
2453 mutex_lock(&md
->file_mutex
);
2454 list_for_each_entry(client
, &md
->file_list
, list
) {
2455 if (client
->dmach
) {
2456 dmaengine_terminate_all(client
->dmach
);
2457 rio_release_dma(client
->dmach
);
2460 mutex_unlock(&md
->file_mutex
);
2463 dmaengine_terminate_all(md
->dma_chan
);
2464 rio_release_dma(md
->dma_chan
);
2465 md
->dma_chan
= NULL
;
2472 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2475 static int mport_cdev_kill_fasync(struct mport_dev
*md
)
2477 unsigned int files
= 0;
2478 struct mport_cdev_priv
*client
;
2480 mutex_lock(&md
->file_mutex
);
2481 list_for_each_entry(client
, &md
->file_list
, list
) {
2482 if (client
->async_queue
)
2483 kill_fasync(&client
->async_queue
, SIGIO
, POLL_HUP
);
2486 mutex_unlock(&md
->file_mutex
);
2491 * mport_cdev_remove() - Remove mport character device
2492 * @dev: Mport device to remove
2494 static void mport_cdev_remove(struct mport_dev
*md
)
2496 struct rio_mport_mapping
*map
, *_map
;
2498 rmcd_debug(EXIT
, "Remove %s cdev", md
->mport
->name
);
2499 atomic_set(&md
->active
, 0);
2500 mport_cdev_terminate_dma(md
);
2501 rio_del_mport_pw_handler(md
->mport
, md
, rio_mport_pw_handler
);
2502 cdev_device_del(&md
->cdev
, &md
->dev
);
2503 mport_cdev_kill_fasync(md
);
2505 /* TODO: do we need to give clients some time to close file
2506 * descriptors? Simple wait for XX, or kref?
2510 * Release DMA buffers allocated for the mport device.
2511 * Disable associated inbound Rapidio requests mapping if applicable.
2513 mutex_lock(&md
->buf_mutex
);
2514 list_for_each_entry_safe(map
, _map
, &md
->mappings
, node
) {
2515 kref_put(&map
->ref
, mport_release_mapping
);
2517 mutex_unlock(&md
->buf_mutex
);
2519 if (!list_empty(&md
->mappings
))
2520 rmcd_warn("WARNING: %s pending mappings on removal",
2523 rio_release_inb_dbell(md
->mport
, 0, 0x0fff);
2525 put_device(&md
->dev
);
2529 * RIO rio_mport_interface driver
2533 * mport_add_mport() - Add rio_mport from LDM device struct
2534 * @dev: Linux device model struct
2535 * @class_intf: Linux class_interface
2537 static int mport_add_mport(struct device
*dev
,
2538 struct class_interface
*class_intf
)
2540 struct rio_mport
*mport
= NULL
;
2541 struct mport_dev
*chdev
= NULL
;
2543 mport
= to_rio_mport(dev
);
2547 chdev
= mport_cdev_add(mport
);
2555 * mport_remove_mport() - Remove rio_mport from global list
2556 * TODO remove device from global mport_dev list
2558 static void mport_remove_mport(struct device
*dev
,
2559 struct class_interface
*class_intf
)
2561 struct rio_mport
*mport
= NULL
;
2562 struct mport_dev
*chdev
;
2565 mport
= to_rio_mport(dev
);
2566 rmcd_debug(EXIT
, "Remove %s", mport
->name
);
2568 mutex_lock(&mport_devs_lock
);
2569 list_for_each_entry(chdev
, &mport_devs
, node
) {
2570 if (chdev
->mport
->id
== mport
->id
) {
2571 atomic_set(&chdev
->active
, 0);
2572 list_del(&chdev
->node
);
2577 mutex_unlock(&mport_devs_lock
);
2580 mport_cdev_remove(chdev
);
2583 /* the rio_mport_interface is used to handle local mport devices */
2584 static struct class_interface rio_mport_interface __refdata
= {
2585 .class = &rio_mport_class
,
2586 .add_dev
= mport_add_mport
,
2587 .remove_dev
= mport_remove_mport
,
2591 * Linux kernel module
2595 * mport_init - Driver module loading
2597 static int __init
mport_init(void)
2601 /* Create device class needed by udev */
2602 dev_class
= class_create(THIS_MODULE
, DRV_NAME
);
2603 if (IS_ERR(dev_class
)) {
2604 rmcd_error("Unable to create " DRV_NAME
" class");
2605 return PTR_ERR(dev_class
);
2608 ret
= alloc_chrdev_region(&dev_number
, 0, RIO_MAX_MPORTS
, DRV_NAME
);
2612 rmcd_debug(INIT
, "Registered class with major=%d", MAJOR(dev_number
));
2614 /* Register to rio_mport_interface */
2615 ret
= class_interface_register(&rio_mport_interface
);
2617 rmcd_error("class_interface_register() failed, err=%d", ret
);
2624 unregister_chrdev_region(dev_number
, RIO_MAX_MPORTS
);
2626 class_destroy(dev_class
);
2631 * mport_exit - Driver module unloading
2633 static void __exit
mport_exit(void)
2635 class_interface_unregister(&rio_mport_interface
);
2636 class_destroy(dev_class
);
2637 unregister_chrdev_region(dev_number
, RIO_MAX_MPORTS
);
2640 module_init(mport_init
);
2641 module_exit(mport_exit
);