]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/dmaengine.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
[mirror_ubuntu-artful-kernel.git] / include / linux / dmaengine.h
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21 #ifndef DMAENGINE_H
22 #define DMAENGINE_H
23
24 #include <linux/device.h>
25 #include <linux/uio.h>
26 #include <linux/kref.h>
27 #include <linux/completion.h>
28 #include <linux/rcupdate.h>
29 #include <linux/dma-mapping.h>
30
31 /**
32 * enum dma_state - resource PNP/power management state
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
38 enum dma_state {
39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
41 DMA_RESOURCE_AVAILABLE,
42 DMA_RESOURCE_REMOVED,
43 };
44
45 /**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51 enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55 };
56
57 /**
58 * typedef dma_cookie_t - an opaque DMA cookie
59 *
60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
61 */
62 typedef s32 dma_cookie_t;
63
64 #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
65
66 /**
67 * enum dma_status - DMA transaction status
68 * @DMA_SUCCESS: transaction completed successfully
69 * @DMA_IN_PROGRESS: transaction not yet processed
70 * @DMA_ERROR: transaction failed
71 */
72 enum dma_status {
73 DMA_SUCCESS,
74 DMA_IN_PROGRESS,
75 DMA_ERROR,
76 };
77
78 /**
79 * enum dma_transaction_type - DMA transaction types/indexes
80 */
81 enum dma_transaction_type {
82 DMA_MEMCPY,
83 DMA_XOR,
84 DMA_PQ_XOR,
85 DMA_DUAL_XOR,
86 DMA_PQ_UPDATE,
87 DMA_ZERO_SUM,
88 DMA_PQ_ZERO_SUM,
89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT,
92 };
93
94 /* last transaction type for creation of the capabilities mask */
95 #define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96
97 /**
98 * enum dma_ctrl_flags - DMA flags to augment operation preparation,
99 * control completion, and communicate status.
100 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
101 * this transaction
102 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
103 * acknowledges receipt, i.e. has has a chance to establish any
104 * dependency chains
105 */
106 enum dma_ctrl_flags {
107 DMA_PREP_INTERRUPT = (1 << 0),
108 DMA_CTRL_ACK = (1 << 1),
109 };
110
111 /**
112 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
113 * See linux/cpumask.h
114 */
115 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
116
117 /**
118 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
119 * @refcount: local_t used for open-coded "bigref" counting
120 * @memcpy_count: transaction counter
121 * @bytes_transferred: byte counter
122 */
123
124 struct dma_chan_percpu {
125 local_t refcount;
126 /* stats */
127 unsigned long memcpy_count;
128 unsigned long bytes_transferred;
129 };
130
131 /**
132 * struct dma_chan - devices supply DMA channels, clients use them
133 * @device: ptr to the dma device who supplies this channel, always !%NULL
134 * @cookie: last cookie value returned to client
135 * @chan_id: channel ID for sysfs
136 * @class_dev: class device for sysfs
137 * @refcount: kref, used in "bigref" slow-mode
138 * @slow_ref: indicates that the DMA channel is free
139 * @rcu: the DMA channel's RCU head
140 * @device_node: used to add this to the device chan list
141 * @local: per-cpu pointer to a struct dma_chan_percpu
142 */
143 struct dma_chan {
144 struct dma_device *device;
145 dma_cookie_t cookie;
146
147 /* sysfs */
148 int chan_id;
149 struct device dev;
150
151 struct kref refcount;
152 int slow_ref;
153 struct rcu_head rcu;
154
155 struct list_head device_node;
156 struct dma_chan_percpu *local;
157 };
158
159 #define to_dma_chan(p) container_of(p, struct dma_chan, dev)
160
161 void dma_chan_cleanup(struct kref *kref);
162
163 static inline void dma_chan_get(struct dma_chan *chan)
164 {
165 if (unlikely(chan->slow_ref))
166 kref_get(&chan->refcount);
167 else {
168 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
169 put_cpu();
170 }
171 }
172
173 static inline void dma_chan_put(struct dma_chan *chan)
174 {
175 if (unlikely(chan->slow_ref))
176 kref_put(&chan->refcount, dma_chan_cleanup);
177 else {
178 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
179 put_cpu();
180 }
181 }
182
183 /*
184 * typedef dma_event_callback - function pointer to a DMA event callback
185 * For each channel added to the system this routine is called for each client.
186 * If the client would like to use the channel it returns '1' to signal (ack)
187 * the dmaengine core to take out a reference on the channel and its
188 * corresponding device. A client must not 'ack' an available channel more
189 * than once. When a channel is removed all clients are notified. If a client
190 * is using the channel it must 'ack' the removal. A client must not 'ack' a
191 * removed channel more than once.
192 * @client - 'this' pointer for the client context
193 * @chan - channel to be acted upon
194 * @state - available or removed
195 */
196 struct dma_client;
197 typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
198 struct dma_chan *chan, enum dma_state state);
199
200 /**
201 * struct dma_client - info on the entity making use of DMA services
202 * @event_callback: func ptr to call when something happens
203 * @cap_mask: only return channels that satisfy the requested capabilities
204 * a value of zero corresponds to any capability
205 * @global_node: list_head for global dma_client_list
206 */
207 struct dma_client {
208 dma_event_callback event_callback;
209 dma_cap_mask_t cap_mask;
210 struct list_head global_node;
211 };
212
213 typedef void (*dma_async_tx_callback)(void *dma_async_param);
214 /**
215 * struct dma_async_tx_descriptor - async transaction descriptor
216 * ---dma generic offload fields---
217 * @cookie: tracking cookie for this transaction, set to -EBUSY if
218 * this tx is sitting on a dependency list
219 * @flags: flags to augment operation preparation, control completion, and
220 * communicate status
221 * @phys: physical address of the descriptor
222 * @tx_list: driver common field for operations that require multiple
223 * descriptors
224 * @chan: target channel for this operation
225 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
226 * @callback: routine to call after this operation is complete
227 * @callback_param: general parameter to pass to the callback routine
228 * ---async_tx api specific fields---
229 * @next: at completion submit this descriptor
230 * @parent: pointer to the next level up in the dependency chain
231 * @lock: protect the parent and next pointers
232 */
233 struct dma_async_tx_descriptor {
234 dma_cookie_t cookie;
235 enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
236 dma_addr_t phys;
237 struct list_head tx_list;
238 struct dma_chan *chan;
239 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
240 dma_async_tx_callback callback;
241 void *callback_param;
242 struct dma_async_tx_descriptor *next;
243 struct dma_async_tx_descriptor *parent;
244 spinlock_t lock;
245 };
246
247 /**
248 * struct dma_device - info on the entity supplying DMA services
249 * @chancnt: how many DMA channels are supported
250 * @channels: the list of struct dma_chan
251 * @global_node: list_head for global dma_device_list
252 * @cap_mask: one or more dma_capability flags
253 * @max_xor: maximum number of xor sources, 0 if no capability
254 * @refcount: reference count
255 * @done: IO completion struct
256 * @dev_id: unique device ID
257 * @dev: struct device reference for dma mapping api
258 * @device_alloc_chan_resources: allocate resources and return the
259 * number of allocated descriptors
260 * @device_free_chan_resources: release DMA channel's resources
261 * @device_prep_dma_memcpy: prepares a memcpy operation
262 * @device_prep_dma_xor: prepares a xor operation
263 * @device_prep_dma_zero_sum: prepares a zero_sum operation
264 * @device_prep_dma_memset: prepares a memset operation
265 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
266 * @device_issue_pending: push pending transactions to hardware
267 */
268 struct dma_device {
269
270 unsigned int chancnt;
271 struct list_head channels;
272 struct list_head global_node;
273 dma_cap_mask_t cap_mask;
274 int max_xor;
275
276 struct kref refcount;
277 struct completion done;
278
279 int dev_id;
280 struct device *dev;
281
282 int (*device_alloc_chan_resources)(struct dma_chan *chan);
283 void (*device_free_chan_resources)(struct dma_chan *chan);
284
285 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
286 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
287 size_t len, unsigned long flags);
288 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
289 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
290 unsigned int src_cnt, size_t len, unsigned long flags);
291 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
292 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
293 size_t len, u32 *result, unsigned long flags);
294 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
295 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
296 unsigned long flags);
297 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
298 struct dma_chan *chan, unsigned long flags);
299
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
301 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used);
303 void (*device_issue_pending)(struct dma_chan *chan);
304 };
305
306 /* --- public DMA engine API --- */
307
308 void dma_async_client_register(struct dma_client *client);
309 void dma_async_client_unregister(struct dma_client *client);
310 void dma_async_client_chan_request(struct dma_client *client);
311 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
312 void *dest, void *src, size_t len);
313 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
314 struct page *page, unsigned int offset, void *kdata, size_t len);
315 dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
316 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
317 unsigned int src_off, size_t len);
318 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan);
320
321 static inline void
322 async_tx_ack(struct dma_async_tx_descriptor *tx)
323 {
324 tx->flags |= DMA_CTRL_ACK;
325 }
326
327 static inline int
328 async_tx_test_ack(struct dma_async_tx_descriptor *tx)
329 {
330 return tx->flags & DMA_CTRL_ACK;
331 }
332
333 #define first_dma_cap(mask) __first_dma_cap(&(mask))
334 static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
335 {
336 return min_t(int, DMA_TX_TYPE_END,
337 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
338 }
339
340 #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
341 static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
342 {
343 return min_t(int, DMA_TX_TYPE_END,
344 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
345 }
346
347 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
348 static inline void
349 __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
350 {
351 set_bit(tx_type, dstp->bits);
352 }
353
354 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
355 static inline int
356 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
357 {
358 return test_bit(tx_type, srcp->bits);
359 }
360
361 #define for_each_dma_cap_mask(cap, mask) \
362 for ((cap) = first_dma_cap(mask); \
363 (cap) < DMA_TX_TYPE_END; \
364 (cap) = next_dma_cap((cap), (mask)))
365
366 /**
367 * dma_async_issue_pending - flush pending transactions to HW
368 * @chan: target DMA channel
369 *
370 * This allows drivers to push copies to HW in batches,
371 * reducing MMIO writes where possible.
372 */
373 static inline void dma_async_issue_pending(struct dma_chan *chan)
374 {
375 chan->device->device_issue_pending(chan);
376 }
377
378 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
379
380 /**
381 * dma_async_is_tx_complete - poll for transaction completion
382 * @chan: DMA channel
383 * @cookie: transaction identifier to check status of
384 * @last: returns last completed cookie, can be NULL
385 * @used: returns last issued cookie, can be NULL
386 *
387 * If @last and @used are passed in, upon return they reflect the driver
388 * internal state and can be used with dma_async_is_complete() to check
389 * the status of multiple cookies without re-checking hardware state.
390 */
391 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
392 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
393 {
394 return chan->device->device_is_tx_complete(chan, cookie, last, used);
395 }
396
397 #define dma_async_memcpy_complete(chan, cookie, last, used)\
398 dma_async_is_tx_complete(chan, cookie, last, used)
399
400 /**
401 * dma_async_is_complete - test a cookie against chan state
402 * @cookie: transaction identifier to test status of
403 * @last_complete: last know completed transaction
404 * @last_used: last cookie value handed out
405 *
406 * dma_async_is_complete() is used in dma_async_memcpy_complete()
407 * the test logic is separated for lightweight testing of multiple cookies
408 */
409 static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
410 dma_cookie_t last_complete, dma_cookie_t last_used)
411 {
412 if (last_complete <= last_used) {
413 if ((cookie <= last_complete) || (cookie > last_used))
414 return DMA_SUCCESS;
415 } else {
416 if ((cookie <= last_complete) && (cookie > last_used))
417 return DMA_SUCCESS;
418 }
419 return DMA_IN_PROGRESS;
420 }
421
422 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
423
424 /* --- DMA device --- */
425
426 int dma_async_device_register(struct dma_device *device);
427 void dma_async_device_unregister(struct dma_device *device);
428
429 /* --- Helper iov-locking functions --- */
430
431 struct dma_page_list {
432 char __user *base_address;
433 int nr_pages;
434 struct page **pages;
435 };
436
437 struct dma_pinned_list {
438 int nr_iovecs;
439 struct dma_page_list page_list[0];
440 };
441
442 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
443 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
444
445 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
446 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
447 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
448 struct dma_pinned_list *pinned_list, struct page *page,
449 unsigned int offset, size_t len);
450
451 #endif /* DMAENGINE_H */