]>
Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
43d6e369 | 2 | * Intel I/OAT DMA Linux driver |
211a22ce | 3 | * Copyright(c) 2004 - 2009 Intel Corporation. |
0bbd5f4e CL |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
43d6e369 SN |
6 | * under the terms and conditions of the GNU General Public License, |
7 | * version 2, as published by the Free Software Foundation. | |
0bbd5f4e CL |
8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
43d6e369 SN |
15 | * this program; if not, write to the Free Software Foundation, Inc., |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
0bbd5f4e | 20 | * |
0bbd5f4e CL |
21 | */ |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | |
25 | * copy operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
0bbd5f4e CL |
31 | #include <linux/pci.h> |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/dmaengine.h> | |
34 | #include <linux/delay.h> | |
6b00c92c | 35 | #include <linux/dma-mapping.h> |
09177e85 | 36 | #include <linux/workqueue.h> |
3ad0b02e | 37 | #include <linux/i7300_idle.h> |
584ec227 DW |
38 | #include "dma.h" |
39 | #include "registers.h" | |
40 | #include "hw.h" | |
0bbd5f4e | 41 | |
5cbafa65 | 42 | int ioat_pending_level = 4; |
7bb67c14 SN |
43 | module_param(ioat_pending_level, int, 0644); |
44 | MODULE_PARM_DESC(ioat_pending_level, | |
45 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
46 | ||
0bbd5f4e | 47 | /* internal functions */ |
5cbafa65 DW |
48 | static void ioat1_cleanup(struct ioat_dma_chan *ioat); |
49 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); | |
3e037454 SN |
50 | |
51 | /** | |
52 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | |
53 | * @irq: interrupt id | |
54 | * @data: interrupt data | |
55 | */ | |
56 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |
57 | { | |
58 | struct ioatdma_device *instance = data; | |
dcbc853a | 59 | struct ioat_chan_common *chan; |
3e037454 SN |
60 | unsigned long attnstatus; |
61 | int bit; | |
62 | u8 intrctrl; | |
63 | ||
64 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
65 | ||
66 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | |
67 | return IRQ_NONE; | |
68 | ||
69 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | |
70 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
71 | return IRQ_NONE; | |
72 | } | |
73 | ||
74 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | |
984b3f57 | 75 | for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { |
dcbc853a DW |
76 | chan = ioat_chan_by_index(instance, bit); |
77 | tasklet_schedule(&chan->cleanup_task); | |
3e037454 SN |
78 | } |
79 | ||
80 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
81 | return IRQ_HANDLED; | |
82 | } | |
83 | ||
84 | /** | |
85 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | |
86 | * @irq: interrupt id | |
87 | * @data: interrupt data | |
88 | */ | |
89 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |
90 | { | |
dcbc853a | 91 | struct ioat_chan_common *chan = data; |
3e037454 | 92 | |
dcbc853a | 93 | tasklet_schedule(&chan->cleanup_task); |
3e037454 SN |
94 | |
95 | return IRQ_HANDLED; | |
96 | } | |
97 | ||
5cbafa65 | 98 | /* common channel initialization */ |
aa4d72ae | 99 | void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) |
5cbafa65 DW |
100 | { |
101 | struct dma_device *dma = &device->common; | |
aa4d72ae DW |
102 | struct dma_chan *c = &chan->common; |
103 | unsigned long data = (unsigned long) c; | |
5cbafa65 DW |
104 | |
105 | chan->device = device; | |
106 | chan->reg_base = device->reg_base + (0x80 * (idx + 1)); | |
5cbafa65 DW |
107 | spin_lock_init(&chan->cleanup_lock); |
108 | chan->common.device = dma; | |
109 | list_add_tail(&chan->common.device_node, &dma->channels); | |
110 | device->idx[idx] = chan; | |
09c8a5b8 | 111 | init_timer(&chan->timer); |
aa4d72ae DW |
112 | chan->timer.function = device->timer_fn; |
113 | chan->timer.data = data; | |
114 | tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); | |
5cbafa65 DW |
115 | tasklet_disable(&chan->cleanup_task); |
116 | } | |
117 | ||
3e037454 | 118 | /** |
5cbafa65 | 119 | * ioat1_dma_enumerate_channels - find and initialize the device's channels |
3e037454 SN |
120 | * @device: the device to be enumerated |
121 | */ | |
5cbafa65 | 122 | static int ioat1_enumerate_channels(struct ioatdma_device *device) |
0bbd5f4e CL |
123 | { |
124 | u8 xfercap_scale; | |
125 | u32 xfercap; | |
126 | int i; | |
dcbc853a | 127 | struct ioat_dma_chan *ioat; |
e6c0b69a | 128 | struct device *dev = &device->pdev->dev; |
f2427e27 | 129 | struct dma_device *dma = &device->common; |
0bbd5f4e | 130 | |
f2427e27 DW |
131 | INIT_LIST_HEAD(&dma->channels); |
132 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
133 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
134 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
135 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
136 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
137 | dma->chancnt = ARRAY_SIZE(device->idx); | |
138 | } | |
e3828811 | 139 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 140 | xfercap_scale &= 0x1f; /* bits [4:0] valid */ |
0bbd5f4e | 141 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); |
6df9183a | 142 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); |
0bbd5f4e | 143 | |
f371be63 | 144 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL |
f2427e27 DW |
145 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) |
146 | dma->chancnt--; | |
27471fdb | 147 | #endif |
f2427e27 | 148 | for (i = 0; i < dma->chancnt; i++) { |
dcbc853a | 149 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); |
5cbafa65 | 150 | if (!ioat) |
0bbd5f4e | 151 | break; |
0bbd5f4e | 152 | |
aa4d72ae | 153 | ioat_init_channel(device, &ioat->base, i); |
dcbc853a | 154 | ioat->xfercap = xfercap; |
dcbc853a DW |
155 | spin_lock_init(&ioat->desc_lock); |
156 | INIT_LIST_HEAD(&ioat->free_desc); | |
157 | INIT_LIST_HEAD(&ioat->used_desc); | |
0bbd5f4e | 158 | } |
5cbafa65 DW |
159 | dma->chancnt = i; |
160 | return i; | |
0bbd5f4e CL |
161 | } |
162 | ||
711924b1 SN |
163 | /** |
164 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | |
165 | * descriptors to hw | |
166 | * @chan: DMA channel handle | |
167 | */ | |
bc3c7025 | 168 | static inline void |
dcbc853a | 169 | __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) |
711924b1 | 170 | { |
dcbc853a DW |
171 | void __iomem *reg_base = ioat->base.reg_base; |
172 | ||
6df9183a DW |
173 | dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", |
174 | __func__, ioat->pending); | |
dcbc853a DW |
175 | ioat->pending = 0; |
176 | writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); | |
711924b1 SN |
177 | } |
178 | ||
179 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |
180 | { | |
dcbc853a | 181 | struct ioat_dma_chan *ioat = to_ioat_chan(chan); |
711924b1 | 182 | |
dcbc853a DW |
183 | if (ioat->pending > 0) { |
184 | spin_lock_bh(&ioat->desc_lock); | |
185 | __ioat1_dma_memcpy_issue_pending(ioat); | |
186 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 SN |
187 | } |
188 | } | |
189 | ||
09177e85 | 190 | /** |
5cbafa65 | 191 | * ioat1_reset_channel - restart a channel |
dcbc853a | 192 | * @ioat: IOAT DMA channel handle |
09177e85 | 193 | */ |
5cbafa65 | 194 | static void ioat1_reset_channel(struct ioat_dma_chan *ioat) |
09177e85 | 195 | { |
dcbc853a DW |
196 | struct ioat_chan_common *chan = &ioat->base; |
197 | void __iomem *reg_base = chan->reg_base; | |
09177e85 MS |
198 | u32 chansts, chanerr; |
199 | ||
09c8a5b8 | 200 | dev_warn(to_dev(chan), "reset\n"); |
dcbc853a | 201 | chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); |
09c8a5b8 | 202 | chansts = *chan->completion & IOAT_CHANSTS_STATUS; |
09177e85 | 203 | if (chanerr) { |
dcbc853a | 204 | dev_err(to_dev(chan), |
09177e85 | 205 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", |
dcbc853a DW |
206 | chan_num(chan), chansts, chanerr); |
207 | writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); | |
09177e85 MS |
208 | } |
209 | ||
210 | /* | |
211 | * whack it upside the head with a reset | |
212 | * and wait for things to settle out. | |
213 | * force the pending count to a really big negative | |
214 | * to make sure no one forces an issue_pending | |
215 | * while we're waiting. | |
216 | */ | |
217 | ||
dcbc853a | 218 | ioat->pending = INT_MIN; |
09177e85 | 219 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 220 | reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
09c8a5b8 DW |
221 | set_bit(IOAT_RESET_PENDING, &chan->state); |
222 | mod_timer(&chan->timer, jiffies + RESET_DELAY); | |
09177e85 MS |
223 | } |
224 | ||
7bb67c14 | 225 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) |
7405f74b | 226 | { |
dcbc853a DW |
227 | struct dma_chan *c = tx->chan; |
228 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
a0587bcf | 229 | struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); |
09c8a5b8 | 230 | struct ioat_chan_common *chan = &ioat->base; |
a0587bcf DW |
231 | struct ioat_desc_sw *first; |
232 | struct ioat_desc_sw *chain_tail; | |
7405f74b | 233 | dma_cookie_t cookie; |
7405f74b | 234 | |
dcbc853a | 235 | spin_lock_bh(&ioat->desc_lock); |
7405f74b | 236 | /* cookie incr and addition to used_list must be atomic */ |
dcbc853a | 237 | cookie = c->cookie; |
7405f74b DW |
238 | cookie++; |
239 | if (cookie < 0) | |
240 | cookie = 1; | |
dcbc853a DW |
241 | c->cookie = cookie; |
242 | tx->cookie = cookie; | |
6df9183a | 243 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
7405f74b DW |
244 | |
245 | /* write address into NextDescriptor field of last desc in chain */ | |
ea25968a | 246 | first = to_ioat_desc(desc->tx_list.next); |
dcbc853a | 247 | chain_tail = to_ioat_desc(ioat->used_desc.prev); |
a0587bcf DW |
248 | /* make descriptor updates globally visible before chaining */ |
249 | wmb(); | |
250 | chain_tail->hw->next = first->txd.phys; | |
ea25968a | 251 | list_splice_tail_init(&desc->tx_list, &ioat->used_desc); |
6df9183a DW |
252 | dump_desc_dbg(ioat, chain_tail); |
253 | dump_desc_dbg(ioat, first); | |
a0587bcf | 254 | |
09c8a5b8 DW |
255 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
256 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
257 | ||
5669e31c | 258 | ioat->active += desc->hw->tx_cnt; |
ad643f54 | 259 | ioat->pending += desc->hw->tx_cnt; |
dcbc853a DW |
260 | if (ioat->pending >= ioat_pending_level) |
261 | __ioat1_dma_memcpy_issue_pending(ioat); | |
262 | spin_unlock_bh(&ioat->desc_lock); | |
7405f74b | 263 | |
7bb67c14 SN |
264 | return cookie; |
265 | } | |
266 | ||
7bb67c14 SN |
267 | /** |
268 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | |
dcbc853a | 269 | * @ioat: the channel supplying the memory pool for the descriptors |
7bb67c14 SN |
270 | * @flags: allocation flags |
271 | */ | |
bc3c7025 | 272 | static struct ioat_desc_sw * |
dcbc853a | 273 | ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) |
0bbd5f4e CL |
274 | { |
275 | struct ioat_dma_descriptor *desc; | |
276 | struct ioat_desc_sw *desc_sw; | |
8ab89567 | 277 | struct ioatdma_device *ioatdma_device; |
0bbd5f4e CL |
278 | dma_addr_t phys; |
279 | ||
dcbc853a | 280 | ioatdma_device = ioat->base.device; |
8ab89567 | 281 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); |
0bbd5f4e CL |
282 | if (unlikely(!desc)) |
283 | return NULL; | |
284 | ||
285 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | |
286 | if (unlikely(!desc_sw)) { | |
8ab89567 | 287 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); |
0bbd5f4e CL |
288 | return NULL; |
289 | } | |
290 | ||
291 | memset(desc, 0, sizeof(*desc)); | |
7bb67c14 | 292 | |
ea25968a | 293 | INIT_LIST_HEAD(&desc_sw->tx_list); |
5cbafa65 DW |
294 | dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); |
295 | desc_sw->txd.tx_submit = ioat1_tx_submit; | |
0bbd5f4e | 296 | desc_sw->hw = desc; |
bc3c7025 | 297 | desc_sw->txd.phys = phys; |
6df9183a | 298 | set_desc_id(desc_sw, -1); |
0bbd5f4e CL |
299 | |
300 | return desc_sw; | |
301 | } | |
302 | ||
7bb67c14 SN |
303 | static int ioat_initial_desc_count = 256; |
304 | module_param(ioat_initial_desc_count, int, 0644); | |
305 | MODULE_PARM_DESC(ioat_initial_desc_count, | |
5cbafa65 | 306 | "ioat1: initial descriptors per channel (default: 256)"); |
7bb67c14 | 307 | /** |
5cbafa65 | 308 | * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors |
7bb67c14 SN |
309 | * @chan: the channel to be filled out |
310 | */ | |
5cbafa65 | 311 | static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) |
0bbd5f4e | 312 | { |
dcbc853a DW |
313 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
314 | struct ioat_chan_common *chan = &ioat->base; | |
711924b1 | 315 | struct ioat_desc_sw *desc; |
0bbd5f4e CL |
316 | u32 chanerr; |
317 | int i; | |
318 | LIST_HEAD(tmp_list); | |
319 | ||
e4223976 | 320 | /* have we already been set up? */ |
dcbc853a DW |
321 | if (!list_empty(&ioat->free_desc)) |
322 | return ioat->desccount; | |
0bbd5f4e | 323 | |
43d6e369 | 324 | /* Setup register to interrupt and write completion status on error */ |
f6ab95b5 | 325 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
0bbd5f4e | 326 | |
dcbc853a | 327 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
0bbd5f4e | 328 | if (chanerr) { |
dcbc853a DW |
329 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); |
330 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
0bbd5f4e CL |
331 | } |
332 | ||
333 | /* Allocate descriptors */ | |
7bb67c14 | 334 | for (i = 0; i < ioat_initial_desc_count; i++) { |
dcbc853a | 335 | desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); |
0bbd5f4e | 336 | if (!desc) { |
dcbc853a | 337 | dev_err(to_dev(chan), "Only %d initial descriptors\n", i); |
0bbd5f4e CL |
338 | break; |
339 | } | |
6df9183a | 340 | set_desc_id(desc, i); |
0bbd5f4e CL |
341 | list_add_tail(&desc->node, &tmp_list); |
342 | } | |
dcbc853a DW |
343 | spin_lock_bh(&ioat->desc_lock); |
344 | ioat->desccount = i; | |
345 | list_splice(&tmp_list, &ioat->free_desc); | |
dcbc853a | 346 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
347 | |
348 | /* allocate a completion writeback area */ | |
349 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
350 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
351 | GFP_KERNEL, &chan->completion_dma); | |
352 | memset(chan->completion, 0, sizeof(*chan->completion)); | |
353 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
dcbc853a | 354 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 355 | writel(((u64) chan->completion_dma) >> 32, |
dcbc853a DW |
356 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
357 | ||
358 | tasklet_enable(&chan->cleanup_task); | |
5cbafa65 | 359 | ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ |
6df9183a DW |
360 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", |
361 | __func__, ioat->desccount); | |
dcbc853a | 362 | return ioat->desccount; |
0bbd5f4e CL |
363 | } |
364 | ||
7bb67c14 | 365 | /** |
5cbafa65 | 366 | * ioat1_dma_free_chan_resources - release all the descriptors |
7bb67c14 SN |
367 | * @chan: the channel to be cleaned |
368 | */ | |
5cbafa65 | 369 | static void ioat1_dma_free_chan_resources(struct dma_chan *c) |
0bbd5f4e | 370 | { |
dcbc853a DW |
371 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
372 | struct ioat_chan_common *chan = &ioat->base; | |
373 | struct ioatdma_device *ioatdma_device = chan->device; | |
0bbd5f4e | 374 | struct ioat_desc_sw *desc, *_desc; |
0bbd5f4e CL |
375 | int in_use_descs = 0; |
376 | ||
c3d4f44f MS |
377 | /* Before freeing channel resources first check |
378 | * if they have been previously allocated for this channel. | |
379 | */ | |
dcbc853a | 380 | if (ioat->desccount == 0) |
c3d4f44f MS |
381 | return; |
382 | ||
dcbc853a | 383 | tasklet_disable(&chan->cleanup_task); |
09c8a5b8 | 384 | del_timer_sync(&chan->timer); |
5cbafa65 | 385 | ioat1_cleanup(ioat); |
0bbd5f4e | 386 | |
3e037454 SN |
387 | /* Delay 100ms after reset to allow internal DMA logic to quiesce |
388 | * before removing DMA descriptor resources. | |
389 | */ | |
7bb67c14 | 390 | writeb(IOAT_CHANCMD_RESET, |
dcbc853a | 391 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); |
3e037454 | 392 | mdelay(100); |
0bbd5f4e | 393 | |
dcbc853a | 394 | spin_lock_bh(&ioat->desc_lock); |
6df9183a DW |
395 | list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { |
396 | dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", | |
397 | __func__, desc_id(desc)); | |
398 | dump_desc_dbg(ioat, desc); | |
5cbafa65 DW |
399 | in_use_descs++; |
400 | list_del(&desc->node); | |
401 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
402 | desc->txd.phys); | |
403 | kfree(desc); | |
404 | } | |
405 | list_for_each_entry_safe(desc, _desc, | |
406 | &ioat->free_desc, node) { | |
407 | list_del(&desc->node); | |
8ab89567 | 408 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, |
bc3c7025 | 409 | desc->txd.phys); |
0bbd5f4e CL |
410 | kfree(desc); |
411 | } | |
dcbc853a | 412 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e | 413 | |
8ab89567 | 414 | pci_pool_free(ioatdma_device->completion_pool, |
4fb9b9e8 DW |
415 | chan->completion, |
416 | chan->completion_dma); | |
0bbd5f4e CL |
417 | |
418 | /* one is ok since we left it on there on purpose */ | |
419 | if (in_use_descs > 1) | |
dcbc853a | 420 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", |
0bbd5f4e CL |
421 | in_use_descs - 1); |
422 | ||
4fb9b9e8 DW |
423 | chan->last_completion = 0; |
424 | chan->completion_dma = 0; | |
dcbc853a | 425 | ioat->pending = 0; |
dcbc853a | 426 | ioat->desccount = 0; |
3e037454 | 427 | } |
7f2b291f | 428 | |
3e037454 | 429 | /** |
dcbc853a DW |
430 | * ioat1_dma_get_next_descriptor - return the next available descriptor |
431 | * @ioat: IOAT DMA channel handle | |
3e037454 SN |
432 | * |
433 | * Gets the next descriptor from the chain, and must be called with the | |
434 | * channel's desc_lock held. Allocates more descriptors if the channel | |
435 | * has run out. | |
436 | */ | |
7f2b291f | 437 | static struct ioat_desc_sw * |
dcbc853a | 438 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) |
3e037454 | 439 | { |
711924b1 | 440 | struct ioat_desc_sw *new; |
3e037454 | 441 | |
dcbc853a DW |
442 | if (!list_empty(&ioat->free_desc)) { |
443 | new = to_ioat_desc(ioat->free_desc.next); | |
3e037454 SN |
444 | list_del(&new->node); |
445 | } else { | |
446 | /* try to get another desc */ | |
dcbc853a | 447 | new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); |
711924b1 | 448 | if (!new) { |
dcbc853a | 449 | dev_err(to_dev(&ioat->base), "alloc failed\n"); |
711924b1 SN |
450 | return NULL; |
451 | } | |
3e037454 | 452 | } |
6df9183a DW |
453 | dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", |
454 | __func__, desc_id(new)); | |
3e037454 SN |
455 | prefetch(new->hw); |
456 | return new; | |
0bbd5f4e CL |
457 | } |
458 | ||
bc3c7025 | 459 | static struct dma_async_tx_descriptor * |
dcbc853a | 460 | ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, |
bc3c7025 | 461 | dma_addr_t dma_src, size_t len, unsigned long flags) |
0bbd5f4e | 462 | { |
dcbc853a | 463 | struct ioat_dma_chan *ioat = to_ioat_chan(c); |
a0587bcf DW |
464 | struct ioat_desc_sw *desc; |
465 | size_t copy; | |
466 | LIST_HEAD(chain); | |
467 | dma_addr_t src = dma_src; | |
468 | dma_addr_t dest = dma_dest; | |
469 | size_t total_len = len; | |
470 | struct ioat_dma_descriptor *hw = NULL; | |
471 | int tx_cnt = 0; | |
0bbd5f4e | 472 | |
dcbc853a | 473 | spin_lock_bh(&ioat->desc_lock); |
5cbafa65 | 474 | desc = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf DW |
475 | do { |
476 | if (!desc) | |
477 | break; | |
0bbd5f4e | 478 | |
a0587bcf | 479 | tx_cnt++; |
dcbc853a | 480 | copy = min_t(size_t, len, ioat->xfercap); |
a0587bcf DW |
481 | |
482 | hw = desc->hw; | |
483 | hw->size = copy; | |
484 | hw->ctl = 0; | |
485 | hw->src_addr = src; | |
486 | hw->dst_addr = dest; | |
487 | ||
488 | list_add_tail(&desc->node, &chain); | |
489 | ||
490 | len -= copy; | |
491 | dest += copy; | |
492 | src += copy; | |
493 | if (len) { | |
494 | struct ioat_desc_sw *next; | |
495 | ||
496 | async_tx_ack(&desc->txd); | |
5cbafa65 | 497 | next = ioat1_dma_get_next_descriptor(ioat); |
a0587bcf | 498 | hw->next = next ? next->txd.phys : 0; |
6df9183a | 499 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
500 | desc = next; |
501 | } else | |
502 | hw->next = 0; | |
503 | } while (len); | |
504 | ||
505 | if (!desc) { | |
dcbc853a DW |
506 | struct ioat_chan_common *chan = &ioat->base; |
507 | ||
508 | dev_err(to_dev(chan), | |
5cbafa65 | 509 | "chan%d - get_next_desc failed\n", chan_num(chan)); |
dcbc853a DW |
510 | list_splice(&chain, &ioat->free_desc); |
511 | spin_unlock_bh(&ioat->desc_lock); | |
711924b1 | 512 | return NULL; |
09177e85 | 513 | } |
dcbc853a | 514 | spin_unlock_bh(&ioat->desc_lock); |
a0587bcf DW |
515 | |
516 | desc->txd.flags = flags; | |
a0587bcf | 517 | desc->len = total_len; |
ea25968a | 518 | list_splice(&chain, &desc->tx_list); |
a0587bcf DW |
519 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); |
520 | hw->ctl_f.compl_write = 1; | |
ad643f54 | 521 | hw->tx_cnt = tx_cnt; |
6df9183a | 522 | dump_desc_dbg(ioat, desc); |
a0587bcf DW |
523 | |
524 | return &desc->txd; | |
0bbd5f4e CL |
525 | } |
526 | ||
aa4d72ae | 527 | static void ioat1_cleanup_event(unsigned long data) |
3e037454 | 528 | { |
aa4d72ae | 529 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
f6ab95b5 | 530 | |
aa4d72ae DW |
531 | ioat1_cleanup(ioat); |
532 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); | |
3e037454 SN |
533 | } |
534 | ||
5cbafa65 DW |
535 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
536 | size_t len, struct ioat_dma_descriptor *hw) | |
0bbd5f4e | 537 | { |
5cbafa65 DW |
538 | struct pci_dev *pdev = chan->device->pdev; |
539 | size_t offset = len - hw->size; | |
0bbd5f4e | 540 | |
5cbafa65 DW |
541 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) |
542 | ioat_unmap(pdev, hw->dst_addr - offset, len, | |
543 | PCI_DMA_FROMDEVICE, flags, 1); | |
0bbd5f4e | 544 | |
5cbafa65 DW |
545 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
546 | ioat_unmap(pdev, hw->src_addr - offset, len, | |
547 | PCI_DMA_TODEVICE, flags, 0); | |
548 | } | |
549 | ||
550 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) | |
551 | { | |
552 | unsigned long phys_complete; | |
4fb9b9e8 | 553 | u64 completion; |
0bbd5f4e | 554 | |
4fb9b9e8 | 555 | completion = *chan->completion; |
09c8a5b8 | 556 | phys_complete = ioat_chansts_to_addr(completion); |
0bbd5f4e | 557 | |
6df9183a DW |
558 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, |
559 | (unsigned long long) phys_complete); | |
560 | ||
09c8a5b8 DW |
561 | if (is_ioat_halted(completion)) { |
562 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
dcbc853a | 563 | dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", |
09c8a5b8 | 564 | chanerr); |
0bbd5f4e CL |
565 | |
566 | /* TODO do something to salvage the situation */ | |
567 | } | |
568 | ||
5cbafa65 DW |
569 | return phys_complete; |
570 | } | |
571 | ||
09c8a5b8 DW |
572 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
573 | unsigned long *phys_complete) | |
5cbafa65 | 574 | { |
09c8a5b8 DW |
575 | *phys_complete = ioat_get_current_completion(chan); |
576 | if (*phys_complete == chan->last_completion) | |
577 | return false; | |
578 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | |
579 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 | 580 | |
09c8a5b8 DW |
581 | return true; |
582 | } | |
0bbd5f4e | 583 | |
09c8a5b8 DW |
584 | static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) |
585 | { | |
586 | struct ioat_chan_common *chan = &ioat->base; | |
587 | struct list_head *_desc, *n; | |
588 | struct dma_async_tx_descriptor *tx; | |
09177e85 | 589 | |
6df9183a DW |
590 | dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", |
591 | __func__, phys_complete); | |
09c8a5b8 DW |
592 | list_for_each_safe(_desc, n, &ioat->used_desc) { |
593 | struct ioat_desc_sw *desc; | |
594 | ||
595 | prefetch(n); | |
596 | desc = list_entry(_desc, typeof(*desc), node); | |
5cbafa65 DW |
597 | tx = &desc->txd; |
598 | /* | |
599 | * Incoming DMA requests may use multiple descriptors, | |
600 | * due to exceeding xfercap, perhaps. If so, only the | |
601 | * last one will have a cookie, and require unmapping. | |
602 | */ | |
6df9183a | 603 | dump_desc_dbg(ioat, desc); |
5cbafa65 | 604 | if (tx->cookie) { |
09c8a5b8 DW |
605 | chan->completed_cookie = tx->cookie; |
606 | tx->cookie = 0; | |
5cbafa65 | 607 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); |
5669e31c | 608 | ioat->active -= desc->hw->tx_cnt; |
5cbafa65 DW |
609 | if (tx->callback) { |
610 | tx->callback(tx->callback_param); | |
611 | tx->callback = NULL; | |
95218430 | 612 | } |
5cbafa65 | 613 | } |
0bbd5f4e | 614 | |
5cbafa65 DW |
615 | if (tx->phys != phys_complete) { |
616 | /* | |
617 | * a completed entry, but not the last, so clean | |
618 | * up if the client is done with the descriptor | |
619 | */ | |
620 | if (async_tx_test_ack(tx)) | |
621 | list_move_tail(&desc->node, &ioat->free_desc); | |
5cbafa65 DW |
622 | } else { |
623 | /* | |
624 | * last used desc. Do not remove, so we can | |
09c8a5b8 | 625 | * append from it. |
5cbafa65 | 626 | */ |
09c8a5b8 DW |
627 | |
628 | /* if nothing else is pending, cancel the | |
629 | * completion timeout | |
630 | */ | |
631 | if (n == &ioat->used_desc) { | |
632 | dev_dbg(to_dev(chan), | |
633 | "%s cancel completion timeout\n", | |
634 | __func__); | |
635 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
636 | } | |
0bbd5f4e | 637 | |
5cbafa65 | 638 | /* TODO check status bits? */ |
0bbd5f4e CL |
639 | break; |
640 | } | |
641 | } | |
642 | ||
09c8a5b8 DW |
643 | chan->last_completion = phys_complete; |
644 | } | |
645 | ||
646 | /** | |
647 | * ioat1_cleanup - cleanup up finished descriptors | |
648 | * @chan: ioat channel to be cleaned up | |
649 | * | |
650 | * To prevent lock contention we defer cleanup when the locks are | |
651 | * contended with a terminal timeout that forces cleanup and catches | |
652 | * completion notification errors. | |
653 | */ | |
654 | static void ioat1_cleanup(struct ioat_dma_chan *ioat) | |
655 | { | |
656 | struct ioat_chan_common *chan = &ioat->base; | |
657 | unsigned long phys_complete; | |
658 | ||
659 | prefetch(chan->completion); | |
660 | ||
661 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
662 | return; | |
663 | ||
664 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
665 | spin_unlock_bh(&chan->cleanup_lock); | |
666 | return; | |
667 | } | |
668 | ||
669 | if (!spin_trylock_bh(&ioat->desc_lock)) { | |
670 | spin_unlock_bh(&chan->cleanup_lock); | |
671 | return; | |
672 | } | |
673 | ||
674 | __cleanup(ioat, phys_complete); | |
675 | ||
dcbc853a | 676 | spin_unlock_bh(&ioat->desc_lock); |
09c8a5b8 DW |
677 | spin_unlock_bh(&chan->cleanup_lock); |
678 | } | |
0bbd5f4e | 679 | |
09c8a5b8 DW |
680 | static void ioat1_timer_event(unsigned long data) |
681 | { | |
aa4d72ae | 682 | struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); |
09c8a5b8 | 683 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 684 | |
09c8a5b8 DW |
685 | dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); |
686 | ||
687 | spin_lock_bh(&chan->cleanup_lock); | |
688 | if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { | |
689 | struct ioat_desc_sw *desc; | |
690 | ||
691 | spin_lock_bh(&ioat->desc_lock); | |
692 | ||
693 | /* restart active descriptors */ | |
694 | desc = to_ioat_desc(ioat->used_desc.prev); | |
695 | ioat_set_chainaddr(ioat, desc->txd.phys); | |
696 | ioat_start(chan); | |
697 | ||
698 | ioat->pending = 0; | |
699 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
700 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
701 | spin_unlock_bh(&ioat->desc_lock); | |
702 | } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
703 | unsigned long phys_complete; | |
704 | ||
705 | spin_lock_bh(&ioat->desc_lock); | |
706 | /* if we haven't made progress and we have already | |
707 | * acknowledged a pending completion once, then be more | |
708 | * forceful with a restart | |
709 | */ | |
710 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
711 | __cleanup(ioat, phys_complete); | |
712 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
713 | ioat1_reset_channel(ioat); | |
714 | else { | |
715 | u64 status = ioat_chansts(chan); | |
716 | ||
717 | /* manually update the last completion address */ | |
718 | if (ioat_chansts_to_addr(status) != 0) | |
719 | *chan->completion = status; | |
720 | ||
721 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
722 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
723 | } | |
724 | spin_unlock_bh(&ioat->desc_lock); | |
725 | } | |
dcbc853a | 726 | spin_unlock_bh(&chan->cleanup_lock); |
0bbd5f4e CL |
727 | } |
728 | ||
aa4d72ae DW |
729 | enum dma_status |
730 | ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, | |
5cbafa65 | 731 | dma_cookie_t *done, dma_cookie_t *used) |
0bbd5f4e | 732 | { |
aa4d72ae DW |
733 | struct ioat_chan_common *chan = to_chan_common(c); |
734 | struct ioatdma_device *device = chan->device; | |
0bbd5f4e | 735 | |
5cbafa65 DW |
736 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) |
737 | return DMA_SUCCESS; | |
0bbd5f4e | 738 | |
aa4d72ae | 739 | device->cleanup_fn((unsigned long) c); |
0bbd5f4e | 740 | |
5cbafa65 | 741 | return ioat_is_complete(c, cookie, done, used); |
0bbd5f4e CL |
742 | } |
743 | ||
5cbafa65 | 744 | static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) |
0bbd5f4e | 745 | { |
dcbc853a | 746 | struct ioat_chan_common *chan = &ioat->base; |
0bbd5f4e | 747 | struct ioat_desc_sw *desc; |
c7984f4e | 748 | struct ioat_dma_descriptor *hw; |
0bbd5f4e | 749 | |
dcbc853a | 750 | spin_lock_bh(&ioat->desc_lock); |
0bbd5f4e | 751 | |
5cbafa65 | 752 | desc = ioat1_dma_get_next_descriptor(ioat); |
7f1b358a MS |
753 | |
754 | if (!desc) { | |
dcbc853a | 755 | dev_err(to_dev(chan), |
7f1b358a | 756 | "Unable to start null desc - get next desc failed\n"); |
dcbc853a | 757 | spin_unlock_bh(&ioat->desc_lock); |
7f1b358a MS |
758 | return; |
759 | } | |
760 | ||
c7984f4e DW |
761 | hw = desc->hw; |
762 | hw->ctl = 0; | |
763 | hw->ctl_f.null = 1; | |
764 | hw->ctl_f.int_en = 1; | |
765 | hw->ctl_f.compl_write = 1; | |
7f1b358a | 766 | /* set size to non-zero value (channel returns error when size is 0) */ |
c7984f4e DW |
767 | hw->size = NULL_DESC_BUFFER_SIZE; |
768 | hw->src_addr = 0; | |
769 | hw->dst_addr = 0; | |
bc3c7025 | 770 | async_tx_ack(&desc->txd); |
5cbafa65 DW |
771 | hw->next = 0; |
772 | list_add_tail(&desc->node, &ioat->used_desc); | |
6df9183a | 773 | dump_desc_dbg(ioat, desc); |
7bb67c14 | 774 | |
09c8a5b8 DW |
775 | ioat_set_chainaddr(ioat, desc->txd.phys); |
776 | ioat_start(chan); | |
dcbc853a | 777 | spin_unlock_bh(&ioat->desc_lock); |
0bbd5f4e CL |
778 | } |
779 | ||
780 | /* | |
781 | * Perform a IOAT transaction to verify the HW works. | |
782 | */ | |
783 | #define IOAT_TEST_SIZE 2000 | |
784 | ||
345d8523 | 785 | static void __devinit ioat_dma_test_callback(void *dma_async_param) |
95218430 | 786 | { |
b9bdcbba DW |
787 | struct completion *cmp = dma_async_param; |
788 | ||
789 | complete(cmp); | |
95218430 SN |
790 | } |
791 | ||
3e037454 SN |
792 | /** |
793 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
794 | * @device: device to be tested | |
795 | */ | |
9de6fc71 | 796 | int __devinit ioat_dma_self_test(struct ioatdma_device *device) |
0bbd5f4e CL |
797 | { |
798 | int i; | |
799 | u8 *src; | |
800 | u8 *dest; | |
bc3c7025 DW |
801 | struct dma_device *dma = &device->common; |
802 | struct device *dev = &device->pdev->dev; | |
0bbd5f4e | 803 | struct dma_chan *dma_chan; |
711924b1 | 804 | struct dma_async_tx_descriptor *tx; |
0036731c | 805 | dma_addr_t dma_dest, dma_src; |
0bbd5f4e CL |
806 | dma_cookie_t cookie; |
807 | int err = 0; | |
b9bdcbba | 808 | struct completion cmp; |
0c33e1ca | 809 | unsigned long tmo; |
4f005dbe | 810 | unsigned long flags; |
0bbd5f4e | 811 | |
e94b1766 | 812 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
813 | if (!src) |
814 | return -ENOMEM; | |
e94b1766 | 815 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); |
0bbd5f4e CL |
816 | if (!dest) { |
817 | kfree(src); | |
818 | return -ENOMEM; | |
819 | } | |
820 | ||
821 | /* Fill in src buffer */ | |
822 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
823 | src[i] = (u8)i; | |
824 | ||
825 | /* Start copy, using first DMA channel */ | |
bc3c7025 | 826 | dma_chan = container_of(dma->channels.next, struct dma_chan, |
43d6e369 | 827 | device_node); |
bc3c7025 DW |
828 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { |
829 | dev_err(dev, "selftest cannot allocate chan resource\n"); | |
0bbd5f4e CL |
830 | err = -ENODEV; |
831 | goto out; | |
832 | } | |
833 | ||
bc3c7025 DW |
834 | dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); |
835 | dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); | |
a6a39ca1 DW |
836 | flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | |
837 | DMA_PREP_INTERRUPT; | |
0036731c | 838 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, |
4f005dbe | 839 | IOAT_TEST_SIZE, flags); |
5149fd01 | 840 | if (!tx) { |
bc3c7025 | 841 | dev_err(dev, "Self-test prep failed, disabling\n"); |
5149fd01 SN |
842 | err = -ENODEV; |
843 | goto free_resources; | |
844 | } | |
845 | ||
7405f74b | 846 | async_tx_ack(tx); |
b9bdcbba | 847 | init_completion(&cmp); |
95218430 | 848 | tx->callback = ioat_dma_test_callback; |
b9bdcbba | 849 | tx->callback_param = &cmp; |
7bb67c14 | 850 | cookie = tx->tx_submit(tx); |
7f2b291f | 851 | if (cookie < 0) { |
bc3c7025 | 852 | dev_err(dev, "Self-test setup failed, disabling\n"); |
7f2b291f SN |
853 | err = -ENODEV; |
854 | goto free_resources; | |
855 | } | |
bc3c7025 | 856 | dma->device_issue_pending(dma_chan); |
532d3b1f | 857 | |
0c33e1ca | 858 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); |
0bbd5f4e | 859 | |
0c33e1ca | 860 | if (tmo == 0 || |
bc3c7025 | 861 | dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) |
7bb67c14 | 862 | != DMA_SUCCESS) { |
bc3c7025 | 863 | dev_err(dev, "Self-test copy timed out, disabling\n"); |
0bbd5f4e CL |
864 | err = -ENODEV; |
865 | goto free_resources; | |
866 | } | |
867 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
bc3c7025 | 868 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
0bbd5f4e CL |
869 | err = -ENODEV; |
870 | goto free_resources; | |
871 | } | |
872 | ||
873 | free_resources: | |
bc3c7025 | 874 | dma->device_free_chan_resources(dma_chan); |
0bbd5f4e CL |
875 | out: |
876 | kfree(src); | |
877 | kfree(dest); | |
878 | return err; | |
879 | } | |
880 | ||
3e037454 SN |
881 | static char ioat_interrupt_style[32] = "msix"; |
882 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
883 | sizeof(ioat_interrupt_style), 0644); | |
884 | MODULE_PARM_DESC(ioat_interrupt_style, | |
885 | "set ioat interrupt style: msix (default), " | |
886 | "msix-single-vector, msi, intx)"); | |
887 | ||
888 | /** | |
889 | * ioat_dma_setup_interrupts - setup interrupt handler | |
890 | * @device: ioat device | |
891 | */ | |
892 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |
893 | { | |
dcbc853a | 894 | struct ioat_chan_common *chan; |
e6c0b69a DW |
895 | struct pci_dev *pdev = device->pdev; |
896 | struct device *dev = &pdev->dev; | |
897 | struct msix_entry *msix; | |
898 | int i, j, msixcnt; | |
899 | int err = -EINVAL; | |
3e037454 SN |
900 | u8 intrctrl = 0; |
901 | ||
902 | if (!strcmp(ioat_interrupt_style, "msix")) | |
903 | goto msix; | |
904 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | |
905 | goto msix_single_vector; | |
906 | if (!strcmp(ioat_interrupt_style, "msi")) | |
907 | goto msi; | |
908 | if (!strcmp(ioat_interrupt_style, "intx")) | |
909 | goto intx; | |
e6c0b69a | 910 | dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); |
5149fd01 | 911 | goto err_no_irq; |
3e037454 SN |
912 | |
913 | msix: | |
914 | /* The number of MSI-X vectors should equal the number of channels */ | |
915 | msixcnt = device->common.chancnt; | |
916 | for (i = 0; i < msixcnt; i++) | |
917 | device->msix_entries[i].entry = i; | |
918 | ||
e6c0b69a | 919 | err = pci_enable_msix(pdev, device->msix_entries, msixcnt); |
3e037454 SN |
920 | if (err < 0) |
921 | goto msi; | |
922 | if (err > 0) | |
923 | goto msix_single_vector; | |
924 | ||
925 | for (i = 0; i < msixcnt; i++) { | |
e6c0b69a | 926 | msix = &device->msix_entries[i]; |
dcbc853a | 927 | chan = ioat_chan_by_index(device, i); |
e6c0b69a DW |
928 | err = devm_request_irq(dev, msix->vector, |
929 | ioat_dma_do_interrupt_msix, 0, | |
dcbc853a | 930 | "ioat-msix", chan); |
3e037454 SN |
931 | if (err) { |
932 | for (j = 0; j < i; j++) { | |
e6c0b69a | 933 | msix = &device->msix_entries[j]; |
dcbc853a DW |
934 | chan = ioat_chan_by_index(device, j); |
935 | devm_free_irq(dev, msix->vector, chan); | |
3e037454 SN |
936 | } |
937 | goto msix_single_vector; | |
938 | } | |
939 | } | |
940 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
3e037454 SN |
941 | goto done; |
942 | ||
943 | msix_single_vector: | |
e6c0b69a DW |
944 | msix = &device->msix_entries[0]; |
945 | msix->entry = 0; | |
946 | err = pci_enable_msix(pdev, device->msix_entries, 1); | |
3e037454 SN |
947 | if (err) |
948 | goto msi; | |
949 | ||
e6c0b69a DW |
950 | err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, |
951 | "ioat-msix", device); | |
3e037454 | 952 | if (err) { |
e6c0b69a | 953 | pci_disable_msix(pdev); |
3e037454 SN |
954 | goto msi; |
955 | } | |
3e037454 SN |
956 | goto done; |
957 | ||
958 | msi: | |
e6c0b69a | 959 | err = pci_enable_msi(pdev); |
3e037454 SN |
960 | if (err) |
961 | goto intx; | |
962 | ||
e6c0b69a DW |
963 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, |
964 | "ioat-msi", device); | |
3e037454 | 965 | if (err) { |
e6c0b69a | 966 | pci_disable_msi(pdev); |
3e037454 SN |
967 | goto intx; |
968 | } | |
3e037454 SN |
969 | goto done; |
970 | ||
971 | intx: | |
e6c0b69a DW |
972 | err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, |
973 | IRQF_SHARED, "ioat-intx", device); | |
3e037454 SN |
974 | if (err) |
975 | goto err_no_irq; | |
3e037454 SN |
976 | |
977 | done: | |
f2427e27 DW |
978 | if (device->intr_quirk) |
979 | device->intr_quirk(device); | |
3e037454 SN |
980 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; |
981 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
982 | return 0; | |
983 | ||
984 | err_no_irq: | |
985 | /* Disable all interrupt generation */ | |
986 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
e6c0b69a DW |
987 | dev_err(dev, "no usable interrupts\n"); |
988 | return err; | |
3e037454 SN |
989 | } |
990 | ||
e6c0b69a | 991 | static void ioat_disable_interrupts(struct ioatdma_device *device) |
3e037454 | 992 | { |
3e037454 SN |
993 | /* Disable all interrupt generation */ |
994 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
3e037454 SN |
995 | } |
996 | ||
345d8523 | 997 | int __devinit ioat_probe(struct ioatdma_device *device) |
0bbd5f4e | 998 | { |
f2427e27 DW |
999 | int err = -ENODEV; |
1000 | struct dma_device *dma = &device->common; | |
1001 | struct pci_dev *pdev = device->pdev; | |
e6c0b69a | 1002 | struct device *dev = &pdev->dev; |
0bbd5f4e CL |
1003 | |
1004 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1005 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
8ab89567 SN |
1006 | sizeof(struct ioat_dma_descriptor), |
1007 | 64, 0); | |
0bbd5f4e CL |
1008 | if (!device->dma_pool) { |
1009 | err = -ENOMEM; | |
1010 | goto err_dma_pool; | |
1011 | } | |
1012 | ||
43d6e369 SN |
1013 | device->completion_pool = pci_pool_create("completion_pool", pdev, |
1014 | sizeof(u64), SMP_CACHE_BYTES, | |
1015 | SMP_CACHE_BYTES); | |
5cbafa65 | 1016 | |
0bbd5f4e CL |
1017 | if (!device->completion_pool) { |
1018 | err = -ENOMEM; | |
1019 | goto err_completion_pool; | |
1020 | } | |
1021 | ||
5cbafa65 | 1022 | device->enumerate_channels(device); |
0bbd5f4e | 1023 | |
f2427e27 | 1024 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
f2427e27 | 1025 | dma->dev = &pdev->dev; |
7bb67c14 | 1026 | |
bc3c7025 | 1027 | if (!dma->chancnt) { |
a6d52d70 | 1028 | dev_err(dev, "channel enumeration error\n"); |
8b794b14 MS |
1029 | goto err_setup_interrupts; |
1030 | } | |
1031 | ||
3e037454 | 1032 | err = ioat_dma_setup_interrupts(device); |
8ab89567 | 1033 | if (err) |
3e037454 | 1034 | goto err_setup_interrupts; |
0bbd5f4e | 1035 | |
9de6fc71 | 1036 | err = device->self_test(device); |
0bbd5f4e CL |
1037 | if (err) |
1038 | goto err_self_test; | |
1039 | ||
f2427e27 | 1040 | return 0; |
0bbd5f4e CL |
1041 | |
1042 | err_self_test: | |
e6c0b69a | 1043 | ioat_disable_interrupts(device); |
3e037454 | 1044 | err_setup_interrupts: |
0bbd5f4e CL |
1045 | pci_pool_destroy(device->completion_pool); |
1046 | err_completion_pool: | |
1047 | pci_pool_destroy(device->dma_pool); | |
1048 | err_dma_pool: | |
f2427e27 DW |
1049 | return err; |
1050 | } | |
1051 | ||
345d8523 | 1052 | int __devinit ioat_register(struct ioatdma_device *device) |
f2427e27 DW |
1053 | { |
1054 | int err = dma_async_device_register(&device->common); | |
1055 | ||
1056 | if (err) { | |
1057 | ioat_disable_interrupts(device); | |
1058 | pci_pool_destroy(device->completion_pool); | |
1059 | pci_pool_destroy(device->dma_pool); | |
1060 | } | |
1061 | ||
1062 | return err; | |
1063 | } | |
1064 | ||
1065 | /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ | |
1066 | static void ioat1_intr_quirk(struct ioatdma_device *device) | |
1067 | { | |
1068 | struct pci_dev *pdev = device->pdev; | |
1069 | u32 dmactrl; | |
1070 | ||
1071 | pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | |
1072 | if (pdev->msi_enabled) | |
1073 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | |
1074 | else | |
1075 | dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; | |
1076 | pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); | |
1077 | } | |
1078 | ||
5669e31c DW |
1079 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
1080 | { | |
1081 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1082 | ||
1083 | return sprintf(page, "%d\n", ioat->desccount); | |
1084 | } | |
1085 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
1086 | ||
1087 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
1088 | { | |
1089 | struct ioat_dma_chan *ioat = to_ioat_chan(c); | |
1090 | ||
1091 | return sprintf(page, "%d\n", ioat->active); | |
1092 | } | |
1093 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
1094 | ||
1095 | static ssize_t cap_show(struct dma_chan *c, char *page) | |
1096 | { | |
1097 | struct dma_device *dma = c->device; | |
1098 | ||
1099 | return sprintf(page, "copy%s%s%s%s%s%s\n", | |
1100 | dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", | |
1101 | dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", | |
1102 | dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", | |
1103 | dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", | |
1104 | dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", | |
1105 | dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); | |
1106 | ||
1107 | } | |
1108 | struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); | |
1109 | ||
1110 | static ssize_t version_show(struct dma_chan *c, char *page) | |
1111 | { | |
1112 | struct dma_device *dma = c->device; | |
1113 | struct ioatdma_device *device = to_ioatdma_device(dma); | |
1114 | ||
1115 | return sprintf(page, "%d.%d\n", | |
1116 | device->version >> 4, device->version & 0xf); | |
1117 | } | |
1118 | struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); | |
1119 | ||
1120 | static struct attribute *ioat1_attrs[] = { | |
1121 | &ring_size_attr.attr, | |
1122 | &ring_active_attr.attr, | |
1123 | &ioat_cap_attr.attr, | |
1124 | &ioat_version_attr.attr, | |
1125 | NULL, | |
1126 | }; | |
1127 | ||
1128 | static ssize_t | |
1129 | ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1130 | { | |
1131 | struct ioat_sysfs_entry *entry; | |
1132 | struct ioat_chan_common *chan; | |
1133 | ||
1134 | entry = container_of(attr, struct ioat_sysfs_entry, attr); | |
1135 | chan = container_of(kobj, struct ioat_chan_common, kobj); | |
1136 | ||
1137 | if (!entry->show) | |
1138 | return -EIO; | |
1139 | return entry->show(&chan->common, page); | |
1140 | } | |
1141 | ||
52cf25d0 | 1142 | const struct sysfs_ops ioat_sysfs_ops = { |
5669e31c DW |
1143 | .show = ioat_attr_show, |
1144 | }; | |
1145 | ||
1146 | static struct kobj_type ioat1_ktype = { | |
1147 | .sysfs_ops = &ioat_sysfs_ops, | |
1148 | .default_attrs = ioat1_attrs, | |
1149 | }; | |
1150 | ||
1151 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) | |
1152 | { | |
1153 | struct dma_device *dma = &device->common; | |
1154 | struct dma_chan *c; | |
1155 | ||
1156 | list_for_each_entry(c, &dma->channels, device_node) { | |
1157 | struct ioat_chan_common *chan = to_chan_common(c); | |
1158 | struct kobject *parent = &c->dev->device.kobj; | |
1159 | int err; | |
1160 | ||
1161 | err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); | |
1162 | if (err) { | |
1163 | dev_warn(to_dev(chan), | |
1164 | "sysfs init error (%d), continuing...\n", err); | |
1165 | kobject_put(&chan->kobj); | |
1166 | set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); | |
1167 | } | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | void ioat_kobject_del(struct ioatdma_device *device) | |
1172 | { | |
1173 | struct dma_device *dma = &device->common; | |
1174 | struct dma_chan *c; | |
1175 | ||
1176 | list_for_each_entry(c, &dma->channels, device_node) { | |
1177 | struct ioat_chan_common *chan = to_chan_common(c); | |
1178 | ||
1179 | if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { | |
1180 | kobject_del(&chan->kobj); | |
1181 | kobject_put(&chan->kobj); | |
1182 | } | |
1183 | } | |
1184 | } | |
1185 | ||
345d8523 | 1186 | int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) |
f2427e27 DW |
1187 | { |
1188 | struct pci_dev *pdev = device->pdev; | |
1189 | struct dma_device *dma; | |
1190 | int err; | |
1191 | ||
1192 | device->intr_quirk = ioat1_intr_quirk; | |
5cbafa65 | 1193 | device->enumerate_channels = ioat1_enumerate_channels; |
9de6fc71 | 1194 | device->self_test = ioat_dma_self_test; |
aa4d72ae DW |
1195 | device->timer_fn = ioat1_timer_event; |
1196 | device->cleanup_fn = ioat1_cleanup_event; | |
f2427e27 DW |
1197 | dma = &device->common; |
1198 | dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | |
1199 | dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; | |
5cbafa65 DW |
1200 | dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; |
1201 | dma->device_free_chan_resources = ioat1_dma_free_chan_resources; | |
aa4d72ae | 1202 | dma->device_is_tx_complete = ioat_is_dma_complete; |
f2427e27 DW |
1203 | |
1204 | err = ioat_probe(device); | |
1205 | if (err) | |
1206 | return err; | |
1207 | ioat_set_tcp_copy_break(4096); | |
1208 | err = ioat_register(device); | |
1209 | if (err) | |
1210 | return err; | |
5669e31c DW |
1211 | ioat_kobject_add(device, &ioat1_ktype); |
1212 | ||
f2427e27 DW |
1213 | if (dca) |
1214 | device->dca = ioat_dca_init(pdev, device->reg_base); | |
1215 | ||
f2427e27 DW |
1216 | return err; |
1217 | } | |
1218 | ||
345d8523 | 1219 | void __devexit ioat_dma_remove(struct ioatdma_device *device) |
0bbd5f4e | 1220 | { |
bc3c7025 | 1221 | struct dma_device *dma = &device->common; |
0bbd5f4e | 1222 | |
e6c0b69a | 1223 | ioat_disable_interrupts(device); |
8ab89567 | 1224 | |
5669e31c DW |
1225 | ioat_kobject_del(device); |
1226 | ||
bc3c7025 | 1227 | dma_async_device_unregister(dma); |
dfe2299e | 1228 | |
0bbd5f4e CL |
1229 | pci_pool_destroy(device->dma_pool); |
1230 | pci_pool_destroy(device->completion_pool); | |
8ab89567 | 1231 | |
dcbc853a | 1232 | INIT_LIST_HEAD(&dma->channels); |
0bbd5f4e | 1233 | } |