]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/dma/mv_xor.h
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/apm...
[mirror_ubuntu-artful-kernel.git] / drivers / dma / mv_xor.h
1 /*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #ifndef MV_XOR_H
19 #define MV_XOR_H
20
21 #include <linux/types.h>
22 #include <linux/io.h>
23 #include <linux/dmaengine.h>
24 #include <linux/interrupt.h>
25
26 #define USE_TIMER
27 #define MV_XOR_POOL_SIZE PAGE_SIZE
28 #define MV_XOR_SLOT_SIZE 64
29 #define MV_XOR_THRESHOLD 1
30 #define MV_XOR_MAX_CHANNELS 2
31
32 /* Values for the XOR_CONFIG register */
33 #define XOR_OPERATION_MODE_XOR 0
34 #define XOR_OPERATION_MODE_MEMCPY 2
35 #define XOR_DESCRIPTOR_SWAP BIT(14)
36
37 #define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
38 #define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
39 #define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
40 #define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
41 #define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
42 #define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
43 #define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
44
45 #define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
46 #define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
47 #define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
48 #define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
49 #define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
50 #define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
51 #define XOR_INTR_MASK_VALUE 0x3F5
52
53 #define WINDOW_BASE(w) (0x50 + ((w) << 2))
54 #define WINDOW_SIZE(w) (0x70 + ((w) << 2))
55 #define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
56 #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
57 #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
58
59 struct mv_xor_device {
60 void __iomem *xor_base;
61 void __iomem *xor_high_base;
62 struct clk *clk;
63 struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
64 };
65
66 /**
67 * struct mv_xor_chan - internal representation of a XOR channel
68 * @pending: allows batching of hardware operations
69 * @lock: serializes enqueue/dequeue operations to the descriptors pool
70 * @mmr_base: memory mapped register base
71 * @idx: the index of the xor channel
72 * @chain: device chain view of the descriptors
73 * @completed_slots: slots completed by HW but still need to be acked
74 * @device: parent device
75 * @common: common dmaengine channel object members
76 * @last_used: place holder for allocation to continue from where it left off
77 * @all_slots: complete domain of slots usable by the channel
78 * @slots_allocated: records the actual size of the descriptor slot pool
79 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
80 */
81 struct mv_xor_chan {
82 int pending;
83 spinlock_t lock; /* protects the descriptor slot pool */
84 void __iomem *mmr_base;
85 void __iomem *mmr_high_base;
86 unsigned int idx;
87 int irq;
88 enum dma_transaction_type current_type;
89 struct list_head chain;
90 struct list_head completed_slots;
91 dma_addr_t dma_desc_pool;
92 void *dma_desc_pool_virt;
93 size_t pool_size;
94 struct dma_device dmadev;
95 struct dma_chan dmachan;
96 struct mv_xor_desc_slot *last_used;
97 struct list_head all_slots;
98 int slots_allocated;
99 struct tasklet_struct irq_tasklet;
100 #ifdef USE_TIMER
101 unsigned long cleanup_time;
102 u32 current_on_last_cleanup;
103 #endif
104 };
105
106 /**
107 * struct mv_xor_desc_slot - software descriptor
108 * @slot_node: node on the mv_xor_chan.all_slots list
109 * @chain_node: node on the mv_xor_chan.chain list
110 * @completed_node: node on the mv_xor_chan.completed_slots list
111 * @hw_desc: virtual address of the hardware descriptor chain
112 * @phys: hardware address of the hardware descriptor chain
113 * @group_head: first operation in a transaction
114 * @slot_cnt: total slots used in an transaction (group of operations)
115 * @slots_per_op: number of slots per operation
116 * @idx: pool index
117 * @unmap_src_cnt: number of xor sources
118 * @unmap_len: transaction bytecount
119 * @tx_list: list of slots that make up a multi-descriptor transaction
120 * @async_tx: support for the async_tx api
121 * @xor_check_result: result of zero sum
122 * @crc32_result: result crc calculation
123 */
124 struct mv_xor_desc_slot {
125 struct list_head slot_node;
126 struct list_head chain_node;
127 struct list_head completed_node;
128 enum dma_transaction_type type;
129 void *hw_desc;
130 struct mv_xor_desc_slot *group_head;
131 u16 slot_cnt;
132 u16 slots_per_op;
133 u16 idx;
134 u16 unmap_src_cnt;
135 u32 value;
136 size_t unmap_len;
137 struct list_head tx_list;
138 struct dma_async_tx_descriptor async_tx;
139 union {
140 u32 *xor_check_result;
141 u32 *crc32_result;
142 };
143 #ifdef USE_TIMER
144 unsigned long arrival_time;
145 struct timer_list timeout;
146 #endif
147 };
148
149 /*
150 * This structure describes XOR descriptor size 64bytes. The
151 * mv_phy_src_idx() macro must be used when indexing the values of the
152 * phy_src_addr[] array. This is due to the fact that the 'descriptor
153 * swap' feature, used on big endian systems, swaps descriptors data
154 * within blocks of 8 bytes. So two consecutive values of the
155 * phy_src_addr[] array are actually swapped in big-endian, which
156 * explains the different mv_phy_src_idx() implementation.
157 */
158 #if defined(__LITTLE_ENDIAN)
159 struct mv_xor_desc {
160 u32 status; /* descriptor execution status */
161 u32 crc32_result; /* result of CRC-32 calculation */
162 u32 desc_command; /* type of operation to be carried out */
163 u32 phy_next_desc; /* next descriptor address pointer */
164 u32 byte_count; /* size of src/dst blocks in bytes */
165 u32 phy_dest_addr; /* destination block address */
166 u32 phy_src_addr[8]; /* source block addresses */
167 u32 reserved0;
168 u32 reserved1;
169 };
170 #define mv_phy_src_idx(src_idx) (src_idx)
171 #else
172 struct mv_xor_desc {
173 u32 crc32_result; /* result of CRC-32 calculation */
174 u32 status; /* descriptor execution status */
175 u32 phy_next_desc; /* next descriptor address pointer */
176 u32 desc_command; /* type of operation to be carried out */
177 u32 phy_dest_addr; /* destination block address */
178 u32 byte_count; /* size of src/dst blocks in bytes */
179 u32 phy_src_addr[8]; /* source block addresses */
180 u32 reserved1;
181 u32 reserved0;
182 };
183 #define mv_phy_src_idx(src_idx) (src_idx ^ 1)
184 #endif
185
186 #define to_mv_sw_desc(addr_hw_desc) \
187 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
188
189 #define mv_hw_desc_slot_idx(hw_desc, idx) \
190 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
191
192 #define MV_XOR_MIN_BYTE_COUNT (128)
193 #define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
194 #define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
195
196
197 #endif