]>
Commit | Line | Data |
---|---|---|
75388acd LF |
1 | /* |
2 | ||
3 | Broadcom B43legacy wireless driver | |
4 | ||
5 | DMA ringbuffer and descriptor allocation/management | |
6 | ||
eb032b98 | 7 | Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> |
75388acd LF |
8 | |
9 | Some code in this file is derived from the b44.c driver | |
10 | Copyright (C) 2002 David S. Miller | |
11 | Copyright (C) Pekka Pietikainen | |
12 | ||
13 | This program is free software; you can redistribute it and/or modify | |
14 | it under the terms of the GNU General Public License as published by | |
15 | the Free Software Foundation; either version 2 of the License, or | |
16 | (at your option) any later version. | |
17 | ||
18 | This program is distributed in the hope that it will be useful, | |
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
21 | GNU General Public License for more details. | |
22 | ||
23 | You should have received a copy of the GNU General Public License | |
24 | along with this program; see the file COPYING. If not, write to | |
25 | the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, | |
26 | Boston, MA 02110-1301, USA. | |
27 | ||
28 | */ | |
29 | ||
30 | #include "b43legacy.h" | |
31 | #include "dma.h" | |
32 | #include "main.h" | |
33 | #include "debugfs.h" | |
34 | #include "xmit.h" | |
35 | ||
36 | #include <linux/dma-mapping.h> | |
37 | #include <linux/pci.h> | |
38 | #include <linux/delay.h> | |
39 | #include <linux/skbuff.h> | |
5a0e3ad6 | 40 | #include <linux/slab.h> |
75388acd LF |
41 | #include <net/dst.h> |
42 | ||
43 | /* 32bit DMA ops. */ | |
44 | static | |
191d6a8c PR |
45 | struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, |
46 | int slot, | |
47 | struct b43legacy_dmadesc_meta **meta) | |
75388acd LF |
48 | { |
49 | struct b43legacy_dmadesc32 *desc; | |
50 | ||
51 | *meta = &(ring->meta[slot]); | |
52 | desc = ring->descbase; | |
53 | desc = &(desc[slot]); | |
54 | ||
191d6a8c | 55 | return (struct b43legacy_dmadesc32 *)desc; |
75388acd LF |
56 | } |
57 | ||
58 | static void op32_fill_descriptor(struct b43legacy_dmaring *ring, | |
191d6a8c | 59 | struct b43legacy_dmadesc32 *desc, |
75388acd LF |
60 | dma_addr_t dmaaddr, u16 bufsize, |
61 | int start, int end, int irq) | |
62 | { | |
63 | struct b43legacy_dmadesc32 *descbase = ring->descbase; | |
64 | int slot; | |
65 | u32 ctl; | |
66 | u32 addr; | |
67 | u32 addrext; | |
68 | ||
191d6a8c | 69 | slot = (int)(desc - descbase); |
75388acd LF |
70 | B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); |
71 | ||
72 | addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); | |
73 | addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) | |
74 | >> SSB_DMA_TRANSLATION_SHIFT; | |
8c95b74d | 75 | addr |= ring->dev->dma.translation; |
75388acd LF |
76 | ctl = (bufsize - ring->frameoffset) |
77 | & B43legacy_DMA32_DCTL_BYTECNT; | |
78 | if (slot == ring->nr_slots - 1) | |
79 | ctl |= B43legacy_DMA32_DCTL_DTABLEEND; | |
80 | if (start) | |
81 | ctl |= B43legacy_DMA32_DCTL_FRAMESTART; | |
82 | if (end) | |
83 | ctl |= B43legacy_DMA32_DCTL_FRAMEEND; | |
84 | if (irq) | |
85 | ctl |= B43legacy_DMA32_DCTL_IRQ; | |
86 | ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) | |
87 | & B43legacy_DMA32_DCTL_ADDREXT_MASK; | |
88 | ||
191d6a8c PR |
89 | desc->control = cpu_to_le32(ctl); |
90 | desc->address = cpu_to_le32(addr); | |
75388acd LF |
91 | } |
92 | ||
93 | static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) | |
94 | { | |
95 | b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, | |
96 | (u32)(slot * sizeof(struct b43legacy_dmadesc32))); | |
97 | } | |
98 | ||
99 | static void op32_tx_suspend(struct b43legacy_dmaring *ring) | |
100 | { | |
101 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, | |
102 | b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) | |
103 | | B43legacy_DMA32_TXSUSPEND); | |
104 | } | |
105 | ||
106 | static void op32_tx_resume(struct b43legacy_dmaring *ring) | |
107 | { | |
108 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, | |
109 | b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) | |
110 | & ~B43legacy_DMA32_TXSUSPEND); | |
111 | } | |
112 | ||
113 | static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) | |
114 | { | |
115 | u32 val; | |
116 | ||
117 | val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); | |
118 | val &= B43legacy_DMA32_RXDPTR; | |
119 | ||
120 | return (val / sizeof(struct b43legacy_dmadesc32)); | |
121 | } | |
122 | ||
123 | static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, | |
124 | int slot) | |
125 | { | |
126 | b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, | |
127 | (u32)(slot * sizeof(struct b43legacy_dmadesc32))); | |
128 | } | |
129 | ||
75388acd LF |
130 | static inline int free_slots(struct b43legacy_dmaring *ring) |
131 | { | |
132 | return (ring->nr_slots - ring->used_slots); | |
133 | } | |
134 | ||
135 | static inline int next_slot(struct b43legacy_dmaring *ring, int slot) | |
136 | { | |
137 | B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); | |
138 | if (slot == ring->nr_slots - 1) | |
139 | return 0; | |
140 | return slot + 1; | |
141 | } | |
142 | ||
143 | static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) | |
144 | { | |
145 | B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); | |
146 | if (slot == 0) | |
147 | return ring->nr_slots - 1; | |
148 | return slot - 1; | |
149 | } | |
150 | ||
151 | #ifdef CONFIG_B43LEGACY_DEBUG | |
152 | static void update_max_used_slots(struct b43legacy_dmaring *ring, | |
153 | int current_used_slots) | |
154 | { | |
155 | if (current_used_slots <= ring->max_used_slots) | |
156 | return; | |
157 | ring->max_used_slots = current_used_slots; | |
158 | if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) | |
159 | b43legacydbg(ring->dev->wl, | |
160 | "max_used_slots increased to %d on %s ring %d\n", | |
161 | ring->max_used_slots, | |
162 | ring->tx ? "TX" : "RX", | |
163 | ring->index); | |
164 | } | |
165 | #else | |
166 | static inline | |
167 | void update_max_used_slots(struct b43legacy_dmaring *ring, | |
168 | int current_used_slots) | |
169 | { } | |
170 | #endif /* DEBUG */ | |
171 | ||
172 | /* Request a slot for usage. */ | |
173 | static inline | |
174 | int request_slot(struct b43legacy_dmaring *ring) | |
175 | { | |
176 | int slot; | |
177 | ||
178 | B43legacy_WARN_ON(!ring->tx); | |
179 | B43legacy_WARN_ON(ring->stopped); | |
180 | B43legacy_WARN_ON(free_slots(ring) == 0); | |
181 | ||
182 | slot = next_slot(ring, ring->current_slot); | |
183 | ring->current_slot = slot; | |
184 | ring->used_slots++; | |
185 | ||
186 | update_max_used_slots(ring, ring->used_slots); | |
187 | ||
188 | return slot; | |
189 | } | |
190 | ||
191 | /* Mac80211-queue to b43legacy-ring mapping */ | |
192 | static struct b43legacy_dmaring *priority_to_txring( | |
193 | struct b43legacy_wldev *dev, | |
194 | int queue_priority) | |
195 | { | |
196 | struct b43legacy_dmaring *ring; | |
197 | ||
198 | /*FIXME: For now we always run on TX-ring-1 */ | |
199 | return dev->dma.tx_ring1; | |
200 | ||
201 | /* 0 = highest priority */ | |
202 | switch (queue_priority) { | |
203 | default: | |
204 | B43legacy_WARN_ON(1); | |
205 | /* fallthrough */ | |
206 | case 0: | |
207 | ring = dev->dma.tx_ring3; | |
208 | break; | |
209 | case 1: | |
210 | ring = dev->dma.tx_ring2; | |
211 | break; | |
212 | case 2: | |
213 | ring = dev->dma.tx_ring1; | |
214 | break; | |
215 | case 3: | |
216 | ring = dev->dma.tx_ring0; | |
217 | break; | |
218 | case 4: | |
219 | ring = dev->dma.tx_ring4; | |
220 | break; | |
221 | case 5: | |
222 | ring = dev->dma.tx_ring5; | |
223 | break; | |
224 | } | |
225 | ||
226 | return ring; | |
227 | } | |
228 | ||
229 | /* Bcm4301-ring to mac80211-queue mapping */ | |
230 | static inline int txring_to_priority(struct b43legacy_dmaring *ring) | |
231 | { | |
232 | static const u8 idx_to_prio[] = | |
233 | { 3, 2, 1, 0, 4, 5, }; | |
234 | ||
235 | /*FIXME: have only one queue, for now */ | |
236 | return 0; | |
237 | ||
238 | return idx_to_prio[ring->index]; | |
239 | } | |
240 | ||
241 | ||
8e118f0e SB |
242 | static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, |
243 | int controller_idx) | |
75388acd | 244 | { |
75388acd LF |
245 | static const u16 map32[] = { |
246 | B43legacy_MMIO_DMA32_BASE0, | |
247 | B43legacy_MMIO_DMA32_BASE1, | |
248 | B43legacy_MMIO_DMA32_BASE2, | |
249 | B43legacy_MMIO_DMA32_BASE3, | |
250 | B43legacy_MMIO_DMA32_BASE4, | |
251 | B43legacy_MMIO_DMA32_BASE5, | |
252 | }; | |
253 | ||
75388acd LF |
254 | B43legacy_WARN_ON(!(controller_idx >= 0 && |
255 | controller_idx < ARRAY_SIZE(map32))); | |
256 | return map32[controller_idx]; | |
257 | } | |
258 | ||
259 | static inline | |
260 | dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, | |
261 | unsigned char *buf, | |
262 | size_t len, | |
263 | int tx) | |
264 | { | |
265 | dma_addr_t dmaaddr; | |
266 | ||
267 | if (tx) | |
4e803132 | 268 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
f225763a MB |
269 | buf, len, |
270 | DMA_TO_DEVICE); | |
75388acd | 271 | else |
4e803132 | 272 | dmaaddr = dma_map_single(ring->dev->dev->dma_dev, |
f225763a MB |
273 | buf, len, |
274 | DMA_FROM_DEVICE); | |
75388acd LF |
275 | |
276 | return dmaaddr; | |
277 | } | |
278 | ||
279 | static inline | |
280 | void unmap_descbuffer(struct b43legacy_dmaring *ring, | |
281 | dma_addr_t addr, | |
282 | size_t len, | |
283 | int tx) | |
284 | { | |
285 | if (tx) | |
4e803132 | 286 | dma_unmap_single(ring->dev->dev->dma_dev, |
f225763a MB |
287 | addr, len, |
288 | DMA_TO_DEVICE); | |
75388acd | 289 | else |
4e803132 | 290 | dma_unmap_single(ring->dev->dev->dma_dev, |
f225763a MB |
291 | addr, len, |
292 | DMA_FROM_DEVICE); | |
75388acd LF |
293 | } |
294 | ||
295 | static inline | |
296 | void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, | |
297 | dma_addr_t addr, | |
298 | size_t len) | |
299 | { | |
300 | B43legacy_WARN_ON(ring->tx); | |
301 | ||
4e803132 FT |
302 | dma_sync_single_for_cpu(ring->dev->dev->dma_dev, |
303 | addr, len, DMA_FROM_DEVICE); | |
75388acd LF |
304 | } |
305 | ||
306 | static inline | |
307 | void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, | |
308 | dma_addr_t addr, | |
309 | size_t len) | |
310 | { | |
311 | B43legacy_WARN_ON(ring->tx); | |
312 | ||
4e803132 FT |
313 | dma_sync_single_for_device(ring->dev->dev->dma_dev, |
314 | addr, len, DMA_FROM_DEVICE); | |
75388acd LF |
315 | } |
316 | ||
317 | static inline | |
318 | void free_descriptor_buffer(struct b43legacy_dmaring *ring, | |
319 | struct b43legacy_dmadesc_meta *meta, | |
320 | int irq_context) | |
321 | { | |
322 | if (meta->skb) { | |
323 | if (irq_context) | |
324 | dev_kfree_skb_irq(meta->skb); | |
325 | else | |
326 | dev_kfree_skb(meta->skb); | |
327 | meta->skb = NULL; | |
328 | } | |
329 | } | |
330 | ||
331 | static int alloc_ringmemory(struct b43legacy_dmaring *ring) | |
332 | { | |
f225763a | 333 | /* GFP flags must match the flags in free_ringmemory()! */ |
4e803132 FT |
334 | ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, |
335 | B43legacy_DMA_RINGMEMSIZE, | |
336 | &(ring->dmabase), | |
337 | GFP_KERNEL); | |
75388acd LF |
338 | if (!ring->descbase) { |
339 | b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" | |
340 | " failed\n"); | |
341 | return -ENOMEM; | |
342 | } | |
343 | memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
348 | static void free_ringmemory(struct b43legacy_dmaring *ring) | |
349 | { | |
4e803132 FT |
350 | dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, |
351 | ring->descbase, ring->dmabase); | |
75388acd LF |
352 | } |
353 | ||
354 | /* Reset the RX DMA channel */ | |
8e118f0e SB |
355 | static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, |
356 | u16 mmio_base, | |
357 | enum b43legacy_dmatype type) | |
75388acd LF |
358 | { |
359 | int i; | |
360 | u32 value; | |
361 | u16 offset; | |
362 | ||
363 | might_sleep(); | |
364 | ||
191d6a8c | 365 | offset = B43legacy_DMA32_RXCTL; |
75388acd LF |
366 | b43legacy_write32(dev, mmio_base + offset, 0); |
367 | for (i = 0; i < 10; i++) { | |
191d6a8c | 368 | offset = B43legacy_DMA32_RXSTATUS; |
75388acd | 369 | value = b43legacy_read32(dev, mmio_base + offset); |
191d6a8c PR |
370 | value &= B43legacy_DMA32_RXSTATE; |
371 | if (value == B43legacy_DMA32_RXSTAT_DISABLED) { | |
372 | i = -1; | |
373 | break; | |
75388acd LF |
374 | } |
375 | msleep(1); | |
376 | } | |
377 | if (i != -1) { | |
378 | b43legacyerr(dev->wl, "DMA RX reset timed out\n"); | |
379 | return -ENODEV; | |
380 | } | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | /* Reset the RX DMA channel */ | |
8e118f0e SB |
386 | static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, |
387 | u16 mmio_base, | |
388 | enum b43legacy_dmatype type) | |
75388acd LF |
389 | { |
390 | int i; | |
391 | u32 value; | |
392 | u16 offset; | |
393 | ||
394 | might_sleep(); | |
395 | ||
396 | for (i = 0; i < 10; i++) { | |
191d6a8c | 397 | offset = B43legacy_DMA32_TXSTATUS; |
75388acd | 398 | value = b43legacy_read32(dev, mmio_base + offset); |
191d6a8c PR |
399 | value &= B43legacy_DMA32_TXSTATE; |
400 | if (value == B43legacy_DMA32_TXSTAT_DISABLED || | |
401 | value == B43legacy_DMA32_TXSTAT_IDLEWAIT || | |
402 | value == B43legacy_DMA32_TXSTAT_STOPPED) | |
403 | break; | |
75388acd LF |
404 | msleep(1); |
405 | } | |
191d6a8c | 406 | offset = B43legacy_DMA32_TXCTL; |
75388acd LF |
407 | b43legacy_write32(dev, mmio_base + offset, 0); |
408 | for (i = 0; i < 10; i++) { | |
191d6a8c | 409 | offset = B43legacy_DMA32_TXSTATUS; |
75388acd | 410 | value = b43legacy_read32(dev, mmio_base + offset); |
191d6a8c PR |
411 | value &= B43legacy_DMA32_TXSTATE; |
412 | if (value == B43legacy_DMA32_TXSTAT_DISABLED) { | |
413 | i = -1; | |
414 | break; | |
75388acd LF |
415 | } |
416 | msleep(1); | |
417 | } | |
418 | if (i != -1) { | |
419 | b43legacyerr(dev->wl, "DMA TX reset timed out\n"); | |
420 | return -ENODEV; | |
421 | } | |
422 | /* ensure the reset is completed. */ | |
423 | msleep(1); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
8e118f0e SB |
428 | /* Check if a DMA mapping address is invalid. */ |
429 | static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, | |
dc4ae1f4 SB |
430 | dma_addr_t addr, |
431 | size_t buffersize, | |
432 | bool dma_to_device) | |
8e118f0e | 433 | { |
4e803132 | 434 | if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) |
8e118f0e SB |
435 | return 1; |
436 | ||
437 | switch (ring->type) { | |
438 | case B43legacy_DMA_30BIT: | |
439 | if ((u64)addr + buffersize > (1ULL << 30)) | |
dc4ae1f4 | 440 | goto address_error; |
8e118f0e SB |
441 | break; |
442 | case B43legacy_DMA_32BIT: | |
443 | if ((u64)addr + buffersize > (1ULL << 32)) | |
dc4ae1f4 | 444 | goto address_error; |
8e118f0e | 445 | break; |
8e118f0e SB |
446 | } |
447 | ||
448 | /* The address is OK. */ | |
449 | return 0; | |
dc4ae1f4 SB |
450 | |
451 | address_error: | |
452 | /* We can't support this address. Unmap it again. */ | |
453 | unmap_descbuffer(ring, addr, buffersize, dma_to_device); | |
454 | ||
455 | return 1; | |
8e118f0e SB |
456 | } |
457 | ||
75388acd | 458 | static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, |
191d6a8c | 459 | struct b43legacy_dmadesc32 *desc, |
75388acd LF |
460 | struct b43legacy_dmadesc_meta *meta, |
461 | gfp_t gfp_flags) | |
462 | { | |
463 | struct b43legacy_rxhdr_fw3 *rxhdr; | |
464 | struct b43legacy_hwtxstatus *txstat; | |
465 | dma_addr_t dmaaddr; | |
466 | struct sk_buff *skb; | |
467 | ||
468 | B43legacy_WARN_ON(ring->tx); | |
469 | ||
470 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | |
471 | if (unlikely(!skb)) | |
472 | return -ENOMEM; | |
473 | dmaaddr = map_descbuffer(ring, skb->data, | |
474 | ring->rx_buffersize, 0); | |
dc4ae1f4 | 475 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
75388acd LF |
476 | /* ugh. try to realloc in zone_dma */ |
477 | gfp_flags |= GFP_DMA; | |
478 | ||
479 | dev_kfree_skb_any(skb); | |
480 | ||
481 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | |
482 | if (unlikely(!skb)) | |
483 | return -ENOMEM; | |
484 | dmaaddr = map_descbuffer(ring, skb->data, | |
485 | ring->rx_buffersize, 0); | |
486 | } | |
487 | ||
dc4ae1f4 | 488 | if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { |
75388acd LF |
489 | dev_kfree_skb_any(skb); |
490 | return -EIO; | |
491 | } | |
492 | ||
493 | meta->skb = skb; | |
494 | meta->dmaaddr = dmaaddr; | |
191d6a8c | 495 | op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); |
75388acd LF |
496 | |
497 | rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); | |
498 | rxhdr->frame_len = 0; | |
499 | txstat = (struct b43legacy_hwtxstatus *)(skb->data); | |
500 | txstat->cookie = 0; | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | /* Allocate the initial descbuffers. | |
506 | * This is used for an RX ring only. | |
507 | */ | |
508 | static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) | |
509 | { | |
510 | int i; | |
511 | int err = -ENOMEM; | |
191d6a8c | 512 | struct b43legacy_dmadesc32 *desc; |
75388acd LF |
513 | struct b43legacy_dmadesc_meta *meta; |
514 | ||
515 | for (i = 0; i < ring->nr_slots; i++) { | |
191d6a8c | 516 | desc = op32_idx2desc(ring, i, &meta); |
75388acd LF |
517 | |
518 | err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); | |
519 | if (err) { | |
520 | b43legacyerr(ring->dev->wl, | |
521 | "Failed to allocate initial descbuffers\n"); | |
522 | goto err_unwind; | |
523 | } | |
524 | } | |
525 | mb(); /* all descbuffer setup before next line */ | |
526 | ring->used_slots = ring->nr_slots; | |
527 | err = 0; | |
528 | out: | |
529 | return err; | |
530 | ||
531 | err_unwind: | |
532 | for (i--; i >= 0; i--) { | |
191d6a8c | 533 | desc = op32_idx2desc(ring, i, &meta); |
75388acd LF |
534 | |
535 | unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); | |
536 | dev_kfree_skb(meta->skb); | |
537 | } | |
538 | goto out; | |
539 | } | |
540 | ||
541 | /* Do initial setup of the DMA controller. | |
542 | * Reset the controller, write the ring busaddress | |
543 | * and switch the "enable" bit on. | |
544 | */ | |
545 | static int dmacontroller_setup(struct b43legacy_dmaring *ring) | |
546 | { | |
547 | int err = 0; | |
548 | u32 value; | |
549 | u32 addrext; | |
8c95b74d | 550 | u32 trans = ring->dev->dma.translation; |
191d6a8c | 551 | u32 ringbase = (u32)(ring->dmabase); |
75388acd LF |
552 | |
553 | if (ring->tx) { | |
191d6a8c PR |
554 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) |
555 | >> SSB_DMA_TRANSLATION_SHIFT; | |
556 | value = B43legacy_DMA32_TXENABLE; | |
557 | value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) | |
558 | & B43legacy_DMA32_TXADDREXT_MASK; | |
559 | b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value); | |
560 | b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, | |
561 | (ringbase & ~SSB_DMA_TRANSLATION_MASK) | |
562 | | trans); | |
75388acd LF |
563 | } else { |
564 | err = alloc_initial_descbuffers(ring); | |
565 | if (err) | |
566 | goto out; | |
191d6a8c PR |
567 | |
568 | addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) | |
569 | >> SSB_DMA_TRANSLATION_SHIFT; | |
570 | value = (ring->frameoffset << | |
571 | B43legacy_DMA32_RXFROFF_SHIFT); | |
572 | value |= B43legacy_DMA32_RXENABLE; | |
573 | value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT) | |
574 | & B43legacy_DMA32_RXADDREXT_MASK; | |
575 | b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value); | |
576 | b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, | |
577 | (ringbase & ~SSB_DMA_TRANSLATION_MASK) | |
578 | | trans); | |
579 | b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200); | |
75388acd LF |
580 | } |
581 | ||
582 | out: | |
583 | return err; | |
584 | } | |
585 | ||
586 | /* Shutdown the DMA controller. */ | |
587 | static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) | |
588 | { | |
589 | if (ring->tx) { | |
590 | b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, | |
8e118f0e | 591 | ring->type); |
191d6a8c | 592 | b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); |
75388acd LF |
593 | } else { |
594 | b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, | |
8e118f0e | 595 | ring->type); |
191d6a8c | 596 | b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); |
75388acd LF |
597 | } |
598 | } | |
599 | ||
600 | static void free_all_descbuffers(struct b43legacy_dmaring *ring) | |
601 | { | |
75388acd LF |
602 | struct b43legacy_dmadesc_meta *meta; |
603 | int i; | |
604 | ||
605 | if (!ring->used_slots) | |
606 | return; | |
607 | for (i = 0; i < ring->nr_slots; i++) { | |
191d6a8c | 608 | op32_idx2desc(ring, i, &meta); |
75388acd LF |
609 | |
610 | if (!meta->skb) { | |
611 | B43legacy_WARN_ON(!ring->tx); | |
612 | continue; | |
613 | } | |
614 | if (ring->tx) | |
615 | unmap_descbuffer(ring, meta->dmaaddr, | |
616 | meta->skb->len, 1); | |
617 | else | |
618 | unmap_descbuffer(ring, meta->dmaaddr, | |
619 | ring->rx_buffersize, 0); | |
620 | free_descriptor_buffer(ring, meta, 0); | |
621 | } | |
622 | } | |
623 | ||
624 | static u64 supported_dma_mask(struct b43legacy_wldev *dev) | |
625 | { | |
626 | u32 tmp; | |
627 | u16 mmio_base; | |
628 | ||
75388acd LF |
629 | mmio_base = b43legacy_dmacontroller_base(0, 0); |
630 | b43legacy_write32(dev, | |
631 | mmio_base + B43legacy_DMA32_TXCTL, | |
632 | B43legacy_DMA32_TXADDREXT_MASK); | |
633 | tmp = b43legacy_read32(dev, mmio_base + | |
634 | B43legacy_DMA32_TXCTL); | |
635 | if (tmp & B43legacy_DMA32_TXADDREXT_MASK) | |
284901a9 | 636 | return DMA_BIT_MASK(32); |
75388acd | 637 | |
28b76796 | 638 | return DMA_BIT_MASK(30); |
75388acd LF |
639 | } |
640 | ||
70197ede LF |
641 | static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) |
642 | { | |
28b76796 | 643 | if (dmamask == DMA_BIT_MASK(30)) |
70197ede | 644 | return B43legacy_DMA_30BIT; |
284901a9 | 645 | if (dmamask == DMA_BIT_MASK(32)) |
70197ede | 646 | return B43legacy_DMA_32BIT; |
70197ede LF |
647 | B43legacy_WARN_ON(1); |
648 | return B43legacy_DMA_30BIT; | |
649 | } | |
650 | ||
75388acd LF |
651 | /* Main initialization function. */ |
652 | static | |
8e118f0e SB |
653 | struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, |
654 | int controller_index, | |
655 | int for_tx, | |
656 | enum b43legacy_dmatype type) | |
75388acd LF |
657 | { |
658 | struct b43legacy_dmaring *ring; | |
659 | int err; | |
660 | int nr_slots; | |
661 | dma_addr_t dma_test; | |
662 | ||
663 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | |
664 | if (!ring) | |
665 | goto out; | |
8e118f0e | 666 | ring->type = type; |
2f9ec47d | 667 | ring->dev = dev; |
75388acd LF |
668 | |
669 | nr_slots = B43legacy_RXRING_SLOTS; | |
670 | if (for_tx) | |
671 | nr_slots = B43legacy_TXRING_SLOTS; | |
672 | ||
673 | ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), | |
674 | GFP_KERNEL); | |
675 | if (!ring->meta) | |
676 | goto err_kfree_ring; | |
677 | if (for_tx) { | |
678 | ring->txhdr_cache = kcalloc(nr_slots, | |
679 | sizeof(struct b43legacy_txhdr_fw3), | |
680 | GFP_KERNEL); | |
681 | if (!ring->txhdr_cache) | |
682 | goto err_kfree_meta; | |
683 | ||
684 | /* test for ability to dma to txhdr_cache */ | |
4e803132 | 685 | dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, |
f225763a MB |
686 | sizeof(struct b43legacy_txhdr_fw3), |
687 | DMA_TO_DEVICE); | |
75388acd | 688 | |
8e118f0e | 689 | if (b43legacy_dma_mapping_error(ring, dma_test, |
dc4ae1f4 | 690 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
75388acd LF |
691 | /* ugh realloc */ |
692 | kfree(ring->txhdr_cache); | |
693 | ring->txhdr_cache = kcalloc(nr_slots, | |
694 | sizeof(struct b43legacy_txhdr_fw3), | |
695 | GFP_KERNEL | GFP_DMA); | |
696 | if (!ring->txhdr_cache) | |
697 | goto err_kfree_meta; | |
698 | ||
4e803132 | 699 | dma_test = dma_map_single(dev->dev->dma_dev, |
75388acd LF |
700 | ring->txhdr_cache, |
701 | sizeof(struct b43legacy_txhdr_fw3), | |
702 | DMA_TO_DEVICE); | |
703 | ||
8e118f0e | 704 | if (b43legacy_dma_mapping_error(ring, dma_test, |
dc4ae1f4 | 705 | sizeof(struct b43legacy_txhdr_fw3), 1)) |
75388acd LF |
706 | goto err_kfree_txhdr_cache; |
707 | } | |
708 | ||
4e803132 FT |
709 | dma_unmap_single(dev->dev->dma_dev, dma_test, |
710 | sizeof(struct b43legacy_txhdr_fw3), | |
711 | DMA_TO_DEVICE); | |
75388acd LF |
712 | } |
713 | ||
75388acd | 714 | ring->nr_slots = nr_slots; |
8e118f0e | 715 | ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); |
75388acd | 716 | ring->index = controller_index; |
75388acd | 717 | if (for_tx) { |
3db1cd5c | 718 | ring->tx = true; |
75388acd LF |
719 | ring->current_slot = -1; |
720 | } else { | |
721 | if (ring->index == 0) { | |
722 | ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; | |
723 | ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; | |
724 | } else if (ring->index == 3) { | |
725 | ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; | |
726 | ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; | |
727 | } else | |
728 | B43legacy_WARN_ON(1); | |
729 | } | |
730 | spin_lock_init(&ring->lock); | |
731 | #ifdef CONFIG_B43LEGACY_DEBUG | |
732 | ring->last_injected_overflow = jiffies; | |
733 | #endif | |
734 | ||
735 | err = alloc_ringmemory(ring); | |
736 | if (err) | |
737 | goto err_kfree_txhdr_cache; | |
738 | err = dmacontroller_setup(ring); | |
739 | if (err) | |
740 | goto err_free_ringmemory; | |
741 | ||
742 | out: | |
743 | return ring; | |
744 | ||
745 | err_free_ringmemory: | |
746 | free_ringmemory(ring); | |
747 | err_kfree_txhdr_cache: | |
748 | kfree(ring->txhdr_cache); | |
749 | err_kfree_meta: | |
750 | kfree(ring->meta); | |
751 | err_kfree_ring: | |
752 | kfree(ring); | |
753 | ring = NULL; | |
754 | goto out; | |
755 | } | |
756 | ||
757 | /* Main cleanup function. */ | |
758 | static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) | |
759 | { | |
760 | if (!ring) | |
761 | return; | |
762 | ||
8e118f0e SB |
763 | b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" |
764 | " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, | |
765 | (ring->tx) ? "TX" : "RX", ring->max_used_slots, | |
766 | ring->nr_slots); | |
75388acd LF |
767 | /* Device IRQs are disabled prior entering this function, |
768 | * so no need to take care of concurrency with rx handler stuff. | |
769 | */ | |
770 | dmacontroller_cleanup(ring); | |
771 | free_all_descbuffers(ring); | |
772 | free_ringmemory(ring); | |
773 | ||
774 | kfree(ring->txhdr_cache); | |
775 | kfree(ring->meta); | |
776 | kfree(ring); | |
777 | } | |
778 | ||
779 | void b43legacy_dma_free(struct b43legacy_wldev *dev) | |
780 | { | |
781 | struct b43legacy_dma *dma; | |
782 | ||
783 | if (b43legacy_using_pio(dev)) | |
784 | return; | |
785 | dma = &dev->dma; | |
786 | ||
787 | b43legacy_destroy_dmaring(dma->rx_ring3); | |
788 | dma->rx_ring3 = NULL; | |
789 | b43legacy_destroy_dmaring(dma->rx_ring0); | |
790 | dma->rx_ring0 = NULL; | |
791 | ||
792 | b43legacy_destroy_dmaring(dma->tx_ring5); | |
793 | dma->tx_ring5 = NULL; | |
794 | b43legacy_destroy_dmaring(dma->tx_ring4); | |
795 | dma->tx_ring4 = NULL; | |
796 | b43legacy_destroy_dmaring(dma->tx_ring3); | |
797 | dma->tx_ring3 = NULL; | |
798 | b43legacy_destroy_dmaring(dma->tx_ring2); | |
799 | dma->tx_ring2 = NULL; | |
800 | b43legacy_destroy_dmaring(dma->tx_ring1); | |
801 | dma->tx_ring1 = NULL; | |
802 | b43legacy_destroy_dmaring(dma->tx_ring0); | |
803 | dma->tx_ring0 = NULL; | |
804 | } | |
805 | ||
70197ede LF |
806 | static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) |
807 | { | |
808 | u64 orig_mask = mask; | |
3db1cd5c | 809 | bool fallback = false; |
70197ede LF |
810 | int err; |
811 | ||
812 | /* Try to set the DMA mask. If it fails, try falling back to a | |
813 | * lower mask, as we can always also support a lower one. */ | |
814 | while (1) { | |
4e803132 FT |
815 | err = dma_set_mask(dev->dev->dma_dev, mask); |
816 | if (!err) { | |
817 | err = dma_set_coherent_mask(dev->dev->dma_dev, mask); | |
818 | if (!err) | |
819 | break; | |
820 | } | |
6a35528a | 821 | if (mask == DMA_BIT_MASK(64)) { |
284901a9 | 822 | mask = DMA_BIT_MASK(32); |
3db1cd5c | 823 | fallback = true; |
70197ede LF |
824 | continue; |
825 | } | |
284901a9 | 826 | if (mask == DMA_BIT_MASK(32)) { |
28b76796 | 827 | mask = DMA_BIT_MASK(30); |
3db1cd5c | 828 | fallback = true; |
70197ede LF |
829 | continue; |
830 | } | |
831 | b43legacyerr(dev->wl, "The machine/kernel does not support " | |
832 | "the required %u-bit DMA mask\n", | |
833 | (unsigned int)dma_mask_to_engine_type(orig_mask)); | |
834 | return -EOPNOTSUPP; | |
835 | } | |
836 | if (fallback) { | |
837 | b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" | |
838 | "bit\n", | |
839 | (unsigned int)dma_mask_to_engine_type(orig_mask), | |
840 | (unsigned int)dma_mask_to_engine_type(mask)); | |
841 | } | |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
75388acd LF |
846 | int b43legacy_dma_init(struct b43legacy_wldev *dev) |
847 | { | |
848 | struct b43legacy_dma *dma = &dev->dma; | |
849 | struct b43legacy_dmaring *ring; | |
850 | int err; | |
851 | u64 dmamask; | |
8e118f0e | 852 | enum b43legacy_dmatype type; |
75388acd LF |
853 | |
854 | dmamask = supported_dma_mask(dev); | |
70197ede LF |
855 | type = dma_mask_to_engine_type(dmamask); |
856 | err = b43legacy_dma_set_mask(dev, dmamask); | |
75388acd | 857 | if (err) { |
354807e0 | 858 | #ifdef CONFIG_B43LEGACY_PIO |
75388acd LF |
859 | b43legacywarn(dev->wl, "DMA for this device not supported. " |
860 | "Falling back to PIO\n"); | |
3db1cd5c | 861 | dev->__using_pio = true; |
75388acd LF |
862 | return -EAGAIN; |
863 | #else | |
864 | b43legacyerr(dev->wl, "DMA for this device not supported and " | |
865 | "no PIO support compiled in\n"); | |
866 | return -EOPNOTSUPP; | |
867 | #endif | |
868 | } | |
8c95b74d | 869 | dma->translation = ssb_dma_translation(dev->dev); |
75388acd LF |
870 | |
871 | err = -ENOMEM; | |
872 | /* setup TX DMA channels. */ | |
8e118f0e | 873 | ring = b43legacy_setup_dmaring(dev, 0, 1, type); |
75388acd LF |
874 | if (!ring) |
875 | goto out; | |
876 | dma->tx_ring0 = ring; | |
877 | ||
8e118f0e | 878 | ring = b43legacy_setup_dmaring(dev, 1, 1, type); |
75388acd LF |
879 | if (!ring) |
880 | goto err_destroy_tx0; | |
881 | dma->tx_ring1 = ring; | |
882 | ||
8e118f0e | 883 | ring = b43legacy_setup_dmaring(dev, 2, 1, type); |
75388acd LF |
884 | if (!ring) |
885 | goto err_destroy_tx1; | |
886 | dma->tx_ring2 = ring; | |
887 | ||
8e118f0e | 888 | ring = b43legacy_setup_dmaring(dev, 3, 1, type); |
75388acd LF |
889 | if (!ring) |
890 | goto err_destroy_tx2; | |
891 | dma->tx_ring3 = ring; | |
892 | ||
8e118f0e | 893 | ring = b43legacy_setup_dmaring(dev, 4, 1, type); |
75388acd LF |
894 | if (!ring) |
895 | goto err_destroy_tx3; | |
896 | dma->tx_ring4 = ring; | |
897 | ||
8e118f0e | 898 | ring = b43legacy_setup_dmaring(dev, 5, 1, type); |
75388acd LF |
899 | if (!ring) |
900 | goto err_destroy_tx4; | |
901 | dma->tx_ring5 = ring; | |
902 | ||
903 | /* setup RX DMA channels. */ | |
8e118f0e | 904 | ring = b43legacy_setup_dmaring(dev, 0, 0, type); |
75388acd LF |
905 | if (!ring) |
906 | goto err_destroy_tx5; | |
907 | dma->rx_ring0 = ring; | |
908 | ||
909 | if (dev->dev->id.revision < 5) { | |
8e118f0e | 910 | ring = b43legacy_setup_dmaring(dev, 3, 0, type); |
75388acd LF |
911 | if (!ring) |
912 | goto err_destroy_rx0; | |
913 | dma->rx_ring3 = ring; | |
914 | } | |
915 | ||
8e118f0e | 916 | b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); |
75388acd LF |
917 | err = 0; |
918 | out: | |
919 | return err; | |
920 | ||
921 | err_destroy_rx0: | |
922 | b43legacy_destroy_dmaring(dma->rx_ring0); | |
923 | dma->rx_ring0 = NULL; | |
924 | err_destroy_tx5: | |
925 | b43legacy_destroy_dmaring(dma->tx_ring5); | |
926 | dma->tx_ring5 = NULL; | |
927 | err_destroy_tx4: | |
928 | b43legacy_destroy_dmaring(dma->tx_ring4); | |
929 | dma->tx_ring4 = NULL; | |
930 | err_destroy_tx3: | |
931 | b43legacy_destroy_dmaring(dma->tx_ring3); | |
932 | dma->tx_ring3 = NULL; | |
933 | err_destroy_tx2: | |
934 | b43legacy_destroy_dmaring(dma->tx_ring2); | |
935 | dma->tx_ring2 = NULL; | |
936 | err_destroy_tx1: | |
937 | b43legacy_destroy_dmaring(dma->tx_ring1); | |
938 | dma->tx_ring1 = NULL; | |
939 | err_destroy_tx0: | |
940 | b43legacy_destroy_dmaring(dma->tx_ring0); | |
941 | dma->tx_ring0 = NULL; | |
942 | goto out; | |
943 | } | |
944 | ||
945 | /* Generate a cookie for the TX header. */ | |
946 | static u16 generate_cookie(struct b43legacy_dmaring *ring, | |
947 | int slot) | |
948 | { | |
949 | u16 cookie = 0x1000; | |
950 | ||
951 | /* Use the upper 4 bits of the cookie as | |
952 | * DMA controller ID and store the slot number | |
953 | * in the lower 12 bits. | |
954 | * Note that the cookie must never be 0, as this | |
955 | * is a special value used in RX path. | |
956 | */ | |
957 | switch (ring->index) { | |
958 | case 0: | |
959 | cookie = 0xA000; | |
960 | break; | |
961 | case 1: | |
962 | cookie = 0xB000; | |
963 | break; | |
964 | case 2: | |
965 | cookie = 0xC000; | |
966 | break; | |
967 | case 3: | |
968 | cookie = 0xD000; | |
969 | break; | |
970 | case 4: | |
971 | cookie = 0xE000; | |
972 | break; | |
973 | case 5: | |
974 | cookie = 0xF000; | |
975 | break; | |
976 | } | |
977 | B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); | |
978 | cookie |= (u16)slot; | |
979 | ||
980 | return cookie; | |
981 | } | |
982 | ||
983 | /* Inspect a cookie and find out to which controller/slot it belongs. */ | |
984 | static | |
985 | struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, | |
986 | u16 cookie, int *slot) | |
987 | { | |
988 | struct b43legacy_dma *dma = &dev->dma; | |
989 | struct b43legacy_dmaring *ring = NULL; | |
990 | ||
991 | switch (cookie & 0xF000) { | |
992 | case 0xA000: | |
993 | ring = dma->tx_ring0; | |
994 | break; | |
995 | case 0xB000: | |
996 | ring = dma->tx_ring1; | |
997 | break; | |
998 | case 0xC000: | |
999 | ring = dma->tx_ring2; | |
1000 | break; | |
1001 | case 0xD000: | |
1002 | ring = dma->tx_ring3; | |
1003 | break; | |
1004 | case 0xE000: | |
1005 | ring = dma->tx_ring4; | |
1006 | break; | |
1007 | case 0xF000: | |
1008 | ring = dma->tx_ring5; | |
1009 | break; | |
1010 | default: | |
1011 | B43legacy_WARN_ON(1); | |
1012 | } | |
1013 | *slot = (cookie & 0x0FFF); | |
1014 | B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); | |
1015 | ||
1016 | return ring; | |
1017 | } | |
1018 | ||
1019 | static int dma_tx_fragment(struct b43legacy_dmaring *ring, | |
d2d8cda7 | 1020 | struct sk_buff **in_skb) |
75388acd | 1021 | { |
d2d8cda7 | 1022 | struct sk_buff *skb = *in_skb; |
e039fa4a | 1023 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
75388acd | 1024 | u8 *header; |
8dd0100c | 1025 | int slot, old_top_slot, old_used_slots; |
75388acd | 1026 | int err; |
191d6a8c | 1027 | struct b43legacy_dmadesc32 *desc; |
75388acd LF |
1028 | struct b43legacy_dmadesc_meta *meta; |
1029 | struct b43legacy_dmadesc_meta *meta_hdr; | |
1030 | struct sk_buff *bounce_skb; | |
1031 | ||
1032 | #define SLOTS_PER_PACKET 2 | |
1033 | B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); | |
1034 | ||
8dd0100c SB |
1035 | old_top_slot = ring->current_slot; |
1036 | old_used_slots = ring->used_slots; | |
1037 | ||
75388acd LF |
1038 | /* Get a slot for the header. */ |
1039 | slot = request_slot(ring); | |
191d6a8c | 1040 | desc = op32_idx2desc(ring, slot, &meta_hdr); |
75388acd LF |
1041 | memset(meta_hdr, 0, sizeof(*meta_hdr)); |
1042 | ||
1043 | header = &(ring->txhdr_cache[slot * sizeof( | |
1044 | struct b43legacy_txhdr_fw3)]); | |
9eca9a8e | 1045 | err = b43legacy_generate_txhdr(ring->dev, header, |
e039fa4a | 1046 | skb->data, skb->len, info, |
75388acd | 1047 | generate_cookie(ring, slot)); |
8dd0100c SB |
1048 | if (unlikely(err)) { |
1049 | ring->current_slot = old_top_slot; | |
1050 | ring->used_slots = old_used_slots; | |
9eca9a8e | 1051 | return err; |
8dd0100c | 1052 | } |
75388acd LF |
1053 | |
1054 | meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, | |
8e118f0e SB |
1055 | sizeof(struct b43legacy_txhdr_fw3), 1); |
1056 | if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, | |
dc4ae1f4 | 1057 | sizeof(struct b43legacy_txhdr_fw3), 1)) { |
8e118f0e SB |
1058 | ring->current_slot = old_top_slot; |
1059 | ring->used_slots = old_used_slots; | |
75388acd | 1060 | return -EIO; |
8e118f0e | 1061 | } |
191d6a8c | 1062 | op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr, |
75388acd LF |
1063 | sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); |
1064 | ||
1065 | /* Get a slot for the payload. */ | |
1066 | slot = request_slot(ring); | |
191d6a8c | 1067 | desc = op32_idx2desc(ring, slot, &meta); |
75388acd LF |
1068 | memset(meta, 0, sizeof(*meta)); |
1069 | ||
75388acd | 1070 | meta->skb = skb; |
3db1cd5c | 1071 | meta->is_last_fragment = true; |
75388acd LF |
1072 | |
1073 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | |
1074 | /* create a bounce buffer in zone_dma on mapping failure. */ | |
dc4ae1f4 | 1075 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
75388acd LF |
1076 | bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
1077 | if (!bounce_skb) { | |
8dd0100c SB |
1078 | ring->current_slot = old_top_slot; |
1079 | ring->used_slots = old_used_slots; | |
75388acd LF |
1080 | err = -ENOMEM; |
1081 | goto out_unmap_hdr; | |
1082 | } | |
1083 | ||
1084 | memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); | |
d2d8cda7 LF |
1085 | memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); |
1086 | bounce_skb->dev = skb->dev; | |
1087 | skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); | |
1088 | info = IEEE80211_SKB_CB(bounce_skb); | |
1089 | ||
75388acd LF |
1090 | dev_kfree_skb_any(skb); |
1091 | skb = bounce_skb; | |
d2d8cda7 | 1092 | *in_skb = bounce_skb; |
75388acd LF |
1093 | meta->skb = skb; |
1094 | meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); | |
dc4ae1f4 | 1095 | if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { |
8dd0100c SB |
1096 | ring->current_slot = old_top_slot; |
1097 | ring->used_slots = old_used_slots; | |
75388acd LF |
1098 | err = -EIO; |
1099 | goto out_free_bounce; | |
1100 | } | |
1101 | } | |
1102 | ||
191d6a8c | 1103 | op32_fill_descriptor(ring, desc, meta->dmaaddr, |
75388acd LF |
1104 | skb->len, 0, 1, 1); |
1105 | ||
1106 | wmb(); /* previous stuff MUST be done */ | |
1107 | /* Now transfer the whole frame. */ | |
191d6a8c | 1108 | op32_poke_tx(ring, next_slot(ring, slot)); |
75388acd LF |
1109 | return 0; |
1110 | ||
1111 | out_free_bounce: | |
1112 | dev_kfree_skb_any(skb); | |
1113 | out_unmap_hdr: | |
1114 | unmap_descbuffer(ring, meta_hdr->dmaaddr, | |
1115 | sizeof(struct b43legacy_txhdr_fw3), 1); | |
1116 | return err; | |
1117 | } | |
1118 | ||
1119 | static inline | |
1120 | int should_inject_overflow(struct b43legacy_dmaring *ring) | |
1121 | { | |
1122 | #ifdef CONFIG_B43LEGACY_DEBUG | |
1123 | if (unlikely(b43legacy_debug(ring->dev, | |
1124 | B43legacy_DBG_DMAOVERFLOW))) { | |
1125 | /* Check if we should inject another ringbuffer overflow | |
1126 | * to test handling of this situation in the stack. */ | |
1127 | unsigned long next_overflow; | |
1128 | ||
1129 | next_overflow = ring->last_injected_overflow + HZ; | |
1130 | if (time_after(jiffies, next_overflow)) { | |
1131 | ring->last_injected_overflow = jiffies; | |
1132 | b43legacydbg(ring->dev->wl, | |
1133 | "Injecting TX ring overflow on " | |
1134 | "DMA controller %d\n", ring->index); | |
1135 | return 1; | |
1136 | } | |
1137 | } | |
1138 | #endif /* CONFIG_B43LEGACY_DEBUG */ | |
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | int b43legacy_dma_tx(struct b43legacy_wldev *dev, | |
e039fa4a | 1143 | struct sk_buff *skb) |
75388acd LF |
1144 | { |
1145 | struct b43legacy_dmaring *ring; | |
1146 | int err = 0; | |
1147 | unsigned long flags; | |
1148 | ||
e2530083 | 1149 | ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); |
75388acd LF |
1150 | spin_lock_irqsave(&ring->lock, flags); |
1151 | B43legacy_WARN_ON(!ring->tx); | |
c1be5152 LF |
1152 | |
1153 | if (unlikely(ring->stopped)) { | |
1154 | /* We get here only because of a bug in mac80211. | |
1155 | * Because of a race, one packet may be queued after | |
1156 | * the queue is stopped, thus we got called when we shouldn't. | |
1157 | * For now, just refuse the transmit. */ | |
1158 | if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) | |
1159 | b43legacyerr(dev->wl, "Packet after queue stopped\n"); | |
1160 | err = -ENOSPC; | |
1161 | goto out_unlock; | |
1162 | } | |
1163 | ||
1164 | if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { | |
1165 | /* If we get here, we have a real error with the queue | |
1166 | * full, but queues not stopped. */ | |
1167 | b43legacyerr(dev->wl, "DMA queue overflow\n"); | |
75388acd LF |
1168 | err = -ENOSPC; |
1169 | goto out_unlock; | |
1170 | } | |
75388acd | 1171 | |
d2d8cda7 LF |
1172 | /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing |
1173 | * into the skb data or cb now. */ | |
d2d8cda7 | 1174 | err = dma_tx_fragment(ring, &skb); |
9eca9a8e SB |
1175 | if (unlikely(err == -ENOKEY)) { |
1176 | /* Drop this packet, as we don't have the encryption key | |
1177 | * anymore and must not transmit it unencrypted. */ | |
1178 | dev_kfree_skb_any(skb); | |
1179 | err = 0; | |
1180 | goto out_unlock; | |
1181 | } | |
75388acd LF |
1182 | if (unlikely(err)) { |
1183 | b43legacyerr(dev->wl, "DMA tx mapping failure\n"); | |
1184 | goto out_unlock; | |
1185 | } | |
75388acd LF |
1186 | if ((free_slots(ring) < SLOTS_PER_PACKET) || |
1187 | should_inject_overflow(ring)) { | |
1188 | /* This TX ring is full. */ | |
1189 | ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); | |
3db1cd5c | 1190 | ring->stopped = true; |
75388acd LF |
1191 | if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) |
1192 | b43legacydbg(dev->wl, "Stopped TX ring %d\n", | |
1193 | ring->index); | |
1194 | } | |
1195 | out_unlock: | |
1196 | spin_unlock_irqrestore(&ring->lock, flags); | |
1197 | ||
1198 | return err; | |
1199 | } | |
1200 | ||
1201 | void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, | |
1202 | const struct b43legacy_txstatus *status) | |
1203 | { | |
75388acd | 1204 | struct b43legacy_dmaring *ring; |
75388acd | 1205 | struct b43legacy_dmadesc_meta *meta; |
e6a9854b | 1206 | int retry_limit; |
75388acd LF |
1207 | int slot; |
1208 | ||
1209 | ring = parse_cookie(dev, status->cookie, &slot); | |
1210 | if (unlikely(!ring)) | |
1211 | return; | |
1212 | B43legacy_WARN_ON(!irqs_disabled()); | |
1213 | spin_lock(&ring->lock); | |
1214 | ||
1215 | B43legacy_WARN_ON(!ring->tx); | |
75388acd LF |
1216 | while (1) { |
1217 | B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); | |
191d6a8c | 1218 | op32_idx2desc(ring, slot, &meta); |
75388acd LF |
1219 | |
1220 | if (meta->skb) | |
1221 | unmap_descbuffer(ring, meta->dmaaddr, | |
1222 | meta->skb->len, 1); | |
1223 | else | |
1224 | unmap_descbuffer(ring, meta->dmaaddr, | |
1225 | sizeof(struct b43legacy_txhdr_fw3), | |
1226 | 1); | |
1227 | ||
1228 | if (meta->is_last_fragment) { | |
e039fa4a JB |
1229 | struct ieee80211_tx_info *info; |
1230 | BUG_ON(!meta->skb); | |
1231 | info = IEEE80211_SKB_CB(meta->skb); | |
e039fa4a | 1232 | |
e6a9854b JB |
1233 | /* preserve the confiured retry limit before clearing the status |
1234 | * The xmit function has overwritten the rc's value with the actual | |
1235 | * retry limit done by the hardware */ | |
1236 | retry_limit = info->status.rates[0].count; | |
1237 | ieee80211_tx_info_clear_status(info); | |
e039fa4a | 1238 | |
e6a9854b | 1239 | if (status->acked) |
e039fa4a | 1240 | info->flags |= IEEE80211_TX_STAT_ACK; |
e6a9854b JB |
1241 | |
1242 | if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { | |
1243 | /* | |
1244 | * If the short retries (RTS, not data frame) have exceeded | |
1245 | * the limit, the hw will not have tried the selected rate, | |
1246 | * but will have used the fallback rate instead. | |
1247 | * Don't let the rate control count attempts for the selected | |
1248 | * rate in this case, otherwise the statistics will be off. | |
1249 | */ | |
1250 | info->status.rates[0].count = 0; | |
1251 | info->status.rates[1].count = status->frame_count; | |
75388acd | 1252 | } else { |
e6a9854b JB |
1253 | if (status->frame_count > retry_limit) { |
1254 | info->status.rates[0].count = retry_limit; | |
1255 | info->status.rates[1].count = status->frame_count - | |
1256 | retry_limit; | |
1257 | ||
1258 | } else { | |
1259 | info->status.rates[0].count = status->frame_count; | |
1260 | info->status.rates[1].idx = -1; | |
1261 | } | |
75388acd | 1262 | } |
e6a9854b JB |
1263 | |
1264 | /* Call back to inform the ieee80211 subsystem about the | |
1265 | * status of the transmission. | |
1266 | * Some fields of txstat are already filled in dma_tx(). | |
1267 | */ | |
e039fa4a | 1268 | ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); |
75388acd LF |
1269 | /* skb is freed by ieee80211_tx_status_irqsafe() */ |
1270 | meta->skb = NULL; | |
1271 | } else { | |
1272 | /* No need to call free_descriptor_buffer here, as | |
1273 | * this is only the txhdr, which is not allocated. | |
1274 | */ | |
1275 | B43legacy_WARN_ON(meta->skb != NULL); | |
1276 | } | |
1277 | ||
1278 | /* Everything unmapped and free'd. So it's not used anymore. */ | |
1279 | ring->used_slots--; | |
1280 | ||
1281 | if (meta->is_last_fragment) | |
1282 | break; | |
1283 | slot = next_slot(ring, slot); | |
1284 | } | |
1285 | dev->stats.last_tx = jiffies; | |
1286 | if (ring->stopped) { | |
1287 | B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); | |
1288 | ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); | |
3db1cd5c | 1289 | ring->stopped = false; |
75388acd LF |
1290 | if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) |
1291 | b43legacydbg(dev->wl, "Woke up TX ring %d\n", | |
1292 | ring->index); | |
1293 | } | |
1294 | ||
1295 | spin_unlock(&ring->lock); | |
1296 | } | |
1297 | ||
75388acd LF |
1298 | static void dma_rx(struct b43legacy_dmaring *ring, |
1299 | int *slot) | |
1300 | { | |
191d6a8c | 1301 | struct b43legacy_dmadesc32 *desc; |
75388acd LF |
1302 | struct b43legacy_dmadesc_meta *meta; |
1303 | struct b43legacy_rxhdr_fw3 *rxhdr; | |
1304 | struct sk_buff *skb; | |
1305 | u16 len; | |
1306 | int err; | |
1307 | dma_addr_t dmaaddr; | |
1308 | ||
191d6a8c | 1309 | desc = op32_idx2desc(ring, *slot, &meta); |
75388acd LF |
1310 | |
1311 | sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); | |
1312 | skb = meta->skb; | |
1313 | ||
1314 | if (ring->index == 3) { | |
1315 | /* We received an xmit status. */ | |
1316 | struct b43legacy_hwtxstatus *hw = | |
1317 | (struct b43legacy_hwtxstatus *)skb->data; | |
1318 | int i = 0; | |
1319 | ||
1320 | while (hw->cookie == 0) { | |
1321 | if (i > 100) | |
1322 | break; | |
1323 | i++; | |
1324 | udelay(2); | |
1325 | barrier(); | |
1326 | } | |
1327 | b43legacy_handle_hwtxstatus(ring->dev, hw); | |
1328 | /* recycle the descriptor buffer. */ | |
1329 | sync_descbuffer_for_device(ring, meta->dmaaddr, | |
1330 | ring->rx_buffersize); | |
1331 | ||
1332 | return; | |
1333 | } | |
1334 | rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; | |
1335 | len = le16_to_cpu(rxhdr->frame_len); | |
1336 | if (len == 0) { | |
1337 | int i = 0; | |
1338 | ||
1339 | do { | |
1340 | udelay(2); | |
1341 | barrier(); | |
1342 | len = le16_to_cpu(rxhdr->frame_len); | |
1343 | } while (len == 0 && i++ < 5); | |
1344 | if (unlikely(len == 0)) { | |
1345 | /* recycle the descriptor buffer. */ | |
1346 | sync_descbuffer_for_device(ring, meta->dmaaddr, | |
1347 | ring->rx_buffersize); | |
1348 | goto drop; | |
1349 | } | |
1350 | } | |
1351 | if (unlikely(len > ring->rx_buffersize)) { | |
1352 | /* The data did not fit into one descriptor buffer | |
1353 | * and is split over multiple buffers. | |
1354 | * This should never happen, as we try to allocate buffers | |
1355 | * big enough. So simply ignore this packet. | |
1356 | */ | |
1357 | int cnt = 0; | |
1358 | s32 tmp = len; | |
1359 | ||
1360 | while (1) { | |
191d6a8c | 1361 | desc = op32_idx2desc(ring, *slot, &meta); |
75388acd LF |
1362 | /* recycle the descriptor buffer. */ |
1363 | sync_descbuffer_for_device(ring, meta->dmaaddr, | |
1364 | ring->rx_buffersize); | |
1365 | *slot = next_slot(ring, *slot); | |
1366 | cnt++; | |
1367 | tmp -= ring->rx_buffersize; | |
1368 | if (tmp <= 0) | |
1369 | break; | |
1370 | } | |
1371 | b43legacyerr(ring->dev->wl, "DMA RX buffer too small " | |
1372 | "(len: %u, buffer: %u, nr-dropped: %d)\n", | |
1373 | len, ring->rx_buffersize, cnt); | |
1374 | goto drop; | |
1375 | } | |
1376 | ||
1377 | dmaaddr = meta->dmaaddr; | |
1378 | err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); | |
1379 | if (unlikely(err)) { | |
1380 | b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" | |
1381 | " failed\n"); | |
1382 | sync_descbuffer_for_device(ring, dmaaddr, | |
1383 | ring->rx_buffersize); | |
1384 | goto drop; | |
1385 | } | |
1386 | ||
1387 | unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); | |
1388 | skb_put(skb, len + ring->frameoffset); | |
1389 | skb_pull(skb, ring->frameoffset); | |
1390 | ||
1391 | b43legacy_rx(ring->dev, skb, rxhdr); | |
1392 | drop: | |
1393 | return; | |
1394 | } | |
1395 | ||
1396 | void b43legacy_dma_rx(struct b43legacy_dmaring *ring) | |
1397 | { | |
75388acd LF |
1398 | int slot; |
1399 | int current_slot; | |
1400 | int used_slots = 0; | |
1401 | ||
1402 | B43legacy_WARN_ON(ring->tx); | |
191d6a8c | 1403 | current_slot = op32_get_current_rxslot(ring); |
75388acd LF |
1404 | B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < |
1405 | ring->nr_slots)); | |
1406 | ||
1407 | slot = ring->current_slot; | |
1408 | for (; slot != current_slot; slot = next_slot(ring, slot)) { | |
1409 | dma_rx(ring, &slot); | |
1410 | update_max_used_slots(ring, ++used_slots); | |
1411 | } | |
191d6a8c | 1412 | op32_set_current_rxslot(ring, slot); |
75388acd LF |
1413 | ring->current_slot = slot; |
1414 | } | |
1415 | ||
1416 | static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) | |
1417 | { | |
1418 | unsigned long flags; | |
1419 | ||
1420 | spin_lock_irqsave(&ring->lock, flags); | |
1421 | B43legacy_WARN_ON(!ring->tx); | |
191d6a8c | 1422 | op32_tx_suspend(ring); |
75388acd LF |
1423 | spin_unlock_irqrestore(&ring->lock, flags); |
1424 | } | |
1425 | ||
1426 | static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) | |
1427 | { | |
1428 | unsigned long flags; | |
1429 | ||
1430 | spin_lock_irqsave(&ring->lock, flags); | |
1431 | B43legacy_WARN_ON(!ring->tx); | |
191d6a8c | 1432 | op32_tx_resume(ring); |
75388acd LF |
1433 | spin_unlock_irqrestore(&ring->lock, flags); |
1434 | } | |
1435 | ||
1436 | void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) | |
1437 | { | |
1438 | b43legacy_power_saving_ctl_bits(dev, -1, 1); | |
1439 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); | |
1440 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); | |
1441 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); | |
1442 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); | |
1443 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); | |
1444 | b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); | |
1445 | } | |
1446 | ||
1447 | void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) | |
1448 | { | |
1449 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); | |
1450 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); | |
1451 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); | |
1452 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); | |
1453 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); | |
1454 | b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); | |
1455 | b43legacy_power_saving_ctl_bits(dev, -1, -1); | |
1456 | } |