]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/dmaengine.h
x86/msr-index: Cleanup bit defines
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / dmaengine.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
d2ebfb33
RKAL
2/*
3 * The contents of this file are private to DMA engine drivers, and is not
4 * part of the API to be used by DMA engine users.
5 */
6#ifndef DMAENGINE_H
7#define DMAENGINE_H
8
f7fbce07 9#include <linux/bug.h>
d2ebfb33
RKAL
10#include <linux/dmaengine.h>
11
d3ee98cd
RKAL
12/**
13 * dma_cookie_init - initialize the cookies for a DMA channel
14 * @chan: dma channel to initialize
15 */
16static inline void dma_cookie_init(struct dma_chan *chan)
17{
18 chan->cookie = DMA_MIN_COOKIE;
19 chan->completed_cookie = DMA_MIN_COOKIE;
20}
21
884485e1
RKAL
22/**
23 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
24 * @tx: descriptor needing cookie
25 *
26 * Assign a unique non-zero per-channel cookie to the descriptor.
27 * Note: caller is expected to hold a lock to prevent concurrency.
28 */
29static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
30{
31 struct dma_chan *chan = tx->chan;
32 dma_cookie_t cookie;
33
34 cookie = chan->cookie + 1;
35 if (cookie < DMA_MIN_COOKIE)
36 cookie = DMA_MIN_COOKIE;
37 tx->cookie = chan->cookie = cookie;
38
39 return cookie;
40}
41
f7fbce07
RKAL
42/**
43 * dma_cookie_complete - complete a descriptor
44 * @tx: descriptor to complete
45 *
46 * Mark this descriptor complete by updating the channels completed
47 * cookie marker. Zero the descriptors cookie to prevent accidental
48 * repeated completions.
49 *
50 * Note: caller is expected to hold a lock to prevent concurrency.
51 */
52static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
53{
54 BUG_ON(tx->cookie < DMA_MIN_COOKIE);
55 tx->chan->completed_cookie = tx->cookie;
56 tx->cookie = 0;
57}
58
96a2af41
RKAL
59/**
60 * dma_cookie_status - report cookie status
61 * @chan: dma channel
62 * @cookie: cookie we are interested in
63 * @state: dma_tx_state structure to return last/used cookies
64 *
65 * Report the status of the cookie, filling in the state structure if
66 * non-NULL. No locking is required.
67 */
68static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
69 dma_cookie_t cookie, struct dma_tx_state *state)
70{
71 dma_cookie_t used, complete;
72
73 used = chan->cookie;
74 complete = chan->completed_cookie;
75 barrier();
76 if (state) {
77 state->last = complete;
78 state->used = used;
79 state->residue = 0;
80 }
81 return dma_async_is_complete(cookie, complete, used);
82}
83
84static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
85{
86 if (state)
87 state->residue = residue;
88}
89
f083f557
DJ
90struct dmaengine_desc_callback {
91 dma_async_tx_callback callback;
f067025b 92 dma_async_tx_callback_result callback_result;
f083f557
DJ
93 void *callback_param;
94};
95
96/**
97 * dmaengine_desc_get_callback - get the passed in callback function
98 * @tx: tx descriptor
99 * @cb: temp struct to hold the callback info
100 *
101 * Fill the passed in cb struct with what's available in the passed in
102 * tx descriptor struct
103 * No locking is required.
104 */
105static inline void
106dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
107 struct dmaengine_desc_callback *cb)
108{
109 cb->callback = tx->callback;
f067025b 110 cb->callback_result = tx->callback_result;
f083f557
DJ
111 cb->callback_param = tx->callback_param;
112}
113
114/**
115 * dmaengine_desc_callback_invoke - call the callback function in cb struct
116 * @cb: temp struct that is holding the callback info
f067025b 117 * @result: transaction result
f083f557
DJ
118 *
119 * Call the callback function provided in the cb struct with the parameter
120 * in the cb struct.
121 * Locking is dependent on the driver.
122 */
123static inline void
124dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
f067025b 125 const struct dmaengine_result *result)
f083f557 126{
f067025b
DJ
127 struct dmaengine_result dummy_result = {
128 .result = DMA_TRANS_NOERROR,
129 .residue = 0
130 };
131
132 if (cb->callback_result) {
133 if (!result)
134 result = &dummy_result;
135 cb->callback_result(cb->callback_param, result);
136 } else if (cb->callback) {
f083f557 137 cb->callback(cb->callback_param);
f067025b 138 }
f083f557
DJ
139}
140
141/**
142 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
143 * then immediately call the callback.
144 * @tx: dma async tx descriptor
f067025b 145 * @result: transaction result
f083f557
DJ
146 *
147 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
148 * in a single function since no work is necessary in between for the driver.
149 * Locking is dependent on the driver.
150 */
151static inline void
152dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
f067025b 153 const struct dmaengine_result *result)
f083f557
DJ
154{
155 struct dmaengine_desc_callback cb;
156
157 dmaengine_desc_get_callback(tx, &cb);
158 dmaengine_desc_callback_invoke(&cb, result);
159}
160
161/**
162 * dmaengine_desc_callback_valid - verify the callback is valid in cb
163 * @cb: callback info struct
164 *
165 * Return a bool that verifies whether callback in cb is valid or not.
166 * No locking is required.
167 */
168static inline bool
169dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
170{
171 return (cb->callback) ? true : false;
172}
173
d2ebfb33 174#endif