]>
Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | #ifndef IOATDMA_V2_H | |
22 | #define IOATDMA_V2_H | |
23 | ||
24 | #include <linux/dmaengine.h> | |
abb12dfd | 25 | #include <linux/circ_buf.h> |
5cbafa65 DW |
26 | #include "dma.h" |
27 | #include "hw.h" | |
28 | ||
29 | ||
30 | extern int ioat_pending_level; | |
bf40a686 | 31 | extern int ioat_ring_alloc_order; |
5cbafa65 DW |
32 | |
33 | /* | |
34 | * workaround for IOAT ver.3.0 null descriptor issue | |
35 | * (channel returns error when size is 0) | |
36 | */ | |
37 | #define NULL_DESC_BUFFER_SIZE 1 | |
38 | ||
39 | #define IOAT_MAX_ORDER 16 | |
40 | #define ioat_get_alloc_order() \ | |
41 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | |
a309218a DW |
42 | #define ioat_get_max_alloc_order() \ |
43 | (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) | |
5cbafa65 DW |
44 | |
45 | /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes | |
46 | * @base: common ioat channel parameters | |
47 | * @xfercap_log; log2 of channel max transfer length (for fast division) | |
48 | * @head: allocated index | |
49 | * @issued: hardware notification point | |
50 | * @tail: cleanup index | |
5cbafa65 DW |
51 | * @dmacount: identical to 'head' except for occasionally resetting to zero |
52 | * @alloc_order: log2 of the number of allocated descriptors | |
074cc476 | 53 | * @produce: number of descriptors to produce at submit time |
5cbafa65 | 54 | * @ring: software ring buffer implementation of hardware ring |
074cc476 | 55 | * @prep_lock: serializes descriptor preparation (producers) |
5cbafa65 DW |
56 | */ |
57 | struct ioat2_dma_chan { | |
58 | struct ioat_chan_common base; | |
59 | size_t xfercap_log; | |
60 | u16 head; | |
61 | u16 issued; | |
62 | u16 tail; | |
63 | u16 dmacount; | |
64 | u16 alloc_order; | |
074cc476 | 65 | u16 produce; |
5cbafa65 | 66 | struct ioat_ring_ent **ring; |
074cc476 | 67 | spinlock_t prep_lock; |
5cbafa65 DW |
68 | }; |
69 | ||
70 | static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) | |
71 | { | |
72 | struct ioat_chan_common *chan = to_chan_common(c); | |
73 | ||
74 | return container_of(chan, struct ioat2_dma_chan, base); | |
75 | } | |
76 | ||
21b764e0 | 77 | static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat) |
5cbafa65 | 78 | { |
abb12dfd | 79 | return 1 << ioat->alloc_order; |
5cbafa65 DW |
80 | } |
81 | ||
82 | /* count of descriptors in flight with the engine */ | |
83 | static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) | |
84 | { | |
abb12dfd | 85 | return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); |
5cbafa65 DW |
86 | } |
87 | ||
88 | /* count of descriptors pending submission to hardware */ | |
89 | static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) | |
90 | { | |
abb12dfd | 91 | return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); |
5cbafa65 DW |
92 | } |
93 | ||
21b764e0 | 94 | static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat) |
5cbafa65 | 95 | { |
abb12dfd | 96 | return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); |
5cbafa65 DW |
97 | } |
98 | ||
5cbafa65 DW |
99 | static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) |
100 | { | |
101 | u16 num_descs = len >> ioat->xfercap_log; | |
102 | ||
103 | num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); | |
104 | return num_descs; | |
105 | } | |
106 | ||
2aec048c DW |
107 | /** |
108 | * struct ioat_ring_ent - wrapper around hardware descriptor | |
109 | * @hw: hardware DMA descriptor (for memcpy) | |
110 | * @fill: hardware fill descriptor | |
111 | * @xor: hardware xor descriptor | |
112 | * @xor_ex: hardware xor extension descriptor | |
113 | * @pq: hardware pq descriptor | |
114 | * @pq_ex: hardware pq extension descriptor | |
115 | * @pqu: hardware pq update descriptor | |
116 | * @raw: hardware raw (un-typed) descriptor | |
117 | * @txd: the generic software descriptor for all engines | |
118 | * @len: total transaction length for unmap | |
b094ad3b | 119 | * @result: asynchronous result of validate operations |
2aec048c DW |
120 | * @id: identifier for debug |
121 | */ | |
122 | ||
5cbafa65 | 123 | struct ioat_ring_ent { |
2aec048c DW |
124 | union { |
125 | struct ioat_dma_descriptor *hw; | |
126 | struct ioat_fill_descriptor *fill; | |
127 | struct ioat_xor_descriptor *xor; | |
128 | struct ioat_xor_ext_descriptor *xor_ex; | |
129 | struct ioat_pq_descriptor *pq; | |
130 | struct ioat_pq_ext_descriptor *pq_ex; | |
131 | struct ioat_pq_update_descriptor *pqu; | |
132 | struct ioat_raw_descriptor *raw; | |
133 | }; | |
5cbafa65 | 134 | size_t len; |
162b96e6 | 135 | struct dma_async_tx_descriptor txd; |
b094ad3b | 136 | enum sum_check_flags *result; |
6df9183a DW |
137 | #ifdef DEBUG |
138 | int id; | |
139 | #endif | |
5cbafa65 DW |
140 | }; |
141 | ||
142 | static inline struct ioat_ring_ent * | |
143 | ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) | |
144 | { | |
abb12dfd | 145 | return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; |
5cbafa65 DW |
146 | } |
147 | ||
09c8a5b8 DW |
148 | static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) |
149 | { | |
150 | struct ioat_chan_common *chan = &ioat->base; | |
151 | ||
152 | writel(addr & 0x00000000FFFFFFFF, | |
153 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
154 | writel(addr >> 32, | |
155 | chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
156 | } | |
157 | ||
4bf27b8b GKH |
158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | |
160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | |
161 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | |
074cc476 | 162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
bf40a686 DW |
163 | int ioat2_enumerate_channels(struct ioatdma_device *device); |
164 | struct dma_async_tx_descriptor * | |
165 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |
166 | dma_addr_t dma_src, size_t len, unsigned long flags); | |
167 | void ioat2_issue_pending(struct dma_chan *chan); | |
168 | int ioat2_alloc_chan_resources(struct dma_chan *c); | |
169 | void ioat2_free_chan_resources(struct dma_chan *c); | |
bf40a686 DW |
170 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); |
171 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | |
b094ad3b | 172 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); |
aa4d72ae | 173 | void ioat2_cleanup_event(unsigned long data); |
e3232714 | 174 | void ioat2_timer_event(unsigned long data); |
a6d52d70 DW |
175 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); |
176 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | |
5669e31c | 177 | extern struct kobj_type ioat2_ktype; |
162b96e6 | 178 | extern struct kmem_cache *ioat2_cache; |
5cbafa65 | 179 | #endif /* IOATDMA_V2_H */ |