2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
19 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
20 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
);
22 static unsigned int ieee80211_get_hdrlen_from_buf(const u8
*data
, unsigned len
)
24 const struct ieee80211_hdr
*hdr
= (const struct ieee80211_hdr
*)data
;
27 if (unlikely(len
< 10))
29 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
30 if (unlikely(hdrlen
> len
))
35 static struct sk_buff
*
36 mt7601u_rx_skb_from_seg(struct mt7601u_dev
*dev
, struct mt7601u_rxwi
*rxwi
,
37 u8
*data
, u32 seg_len
)
42 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
))
45 skb
= alloc_skb(seg_len
, GFP_ATOMIC
);
49 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
)) {
50 int hdr_len
= ieee80211_get_hdrlen_from_buf(data
, seg_len
);
52 memcpy(skb_put(skb
, hdr_len
), data
, hdr_len
);
57 memcpy(skb_put(skb
, seg_len
), data
, seg_len
);
59 true_len
= mt76_mac_process_rx(dev
, skb
, skb
->data
, rxwi
);
60 skb_trim(skb
, true_len
);
65 static struct sk_buff
*
66 mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev
*dev
,
67 struct mt7601u_rxwi
*rxwi
, void *data
,
68 u32 seg_len
, u32 truesize
, struct page
*p
)
70 unsigned int hdr_len
= ieee80211_get_hdrlen_from_buf(data
, seg_len
);
71 unsigned int true_len
, copy
, frag
;
74 skb
= alloc_skb(128, GFP_ATOMIC
);
78 true_len
= mt76_mac_process_rx(dev
, skb
, data
, rxwi
);
80 if (rxwi
->rxinfo
& cpu_to_le32(MT_RXINFO_L2PAD
)) {
81 memcpy(skb_put(skb
, hdr_len
), data
, hdr_len
);
87 copy
= (true_len
<= skb_tailroom(skb
)) ? true_len
: hdr_len
+ 8;
88 frag
= true_len
- copy
;
90 memcpy(skb_put(skb
, copy
), data
, copy
);
94 skb_add_rx_frag(skb
, 0, p
, data
- page_address(p
),
102 static void mt7601u_rx_process_seg(struct mt7601u_dev
*dev
, u8
*data
,
103 u32 seg_len
, struct page
*p
, bool paged
)
106 struct mt7601u_rxwi
*rxwi
;
107 u32 fce_info
, truesize
= seg_len
;
109 /* DMA_INFO field at the beginning of the segment contains only some of
110 * the information, we need to read the FCE descriptor from the end.
112 fce_info
= get_unaligned_le32(data
+ seg_len
- MT_FCE_INFO_LEN
);
113 seg_len
-= MT_FCE_INFO_LEN
;
115 data
+= MT_DMA_HDR_LEN
;
116 seg_len
-= MT_DMA_HDR_LEN
;
118 rxwi
= (struct mt7601u_rxwi
*) data
;
119 data
+= sizeof(struct mt7601u_rxwi
);
120 seg_len
-= sizeof(struct mt7601u_rxwi
);
122 if (unlikely(rxwi
->zero
[0] || rxwi
->zero
[1] || rxwi
->zero
[2]))
123 dev_err_once(dev
->dev
, "Error: RXWI zero fields are set\n");
124 if (unlikely(MT76_GET(MT_RXD_INFO_TYPE
, fce_info
)))
125 dev_err_once(dev
->dev
, "Error: RX path seen a non-pkt urb\n");
127 trace_mt_rx(dev
, rxwi
, fce_info
);
130 skb
= mt7601u_rx_skb_from_seg_paged(dev
, rxwi
, data
, seg_len
,
133 skb
= mt7601u_rx_skb_from_seg(dev
, rxwi
, data
, seg_len
);
137 ieee80211_rx_ni(dev
->hw
, skb
);
140 static u16
mt7601u_rx_next_seg_len(u8
*data
, u32 data_len
)
142 u32 min_seg_len
= MT_DMA_HDR_LEN
+ MT_RX_INFO_LEN
+
143 sizeof(struct mt7601u_rxwi
) + MT_FCE_INFO_LEN
;
144 u16 dma_len
= get_unaligned_le16(data
);
146 if (data_len
< min_seg_len
||
148 WARN_ON(dma_len
+ MT_DMA_HDRS
> data_len
) ||
149 WARN_ON(dma_len
& 0x3))
152 return MT_DMA_HDRS
+ dma_len
;
156 mt7601u_rx_process_entry(struct mt7601u_dev
*dev
, struct mt7601u_dma_buf_rx
*e
)
158 u32 seg_len
, data_len
= e
->urb
->actual_length
;
159 u8
*data
= page_address(e
->p
);
160 struct page
*new_p
= NULL
;
164 if (!test_bit(MT7601U_STATE_INITIALIZED
, &dev
->state
))
167 /* Copy if there is very little data in the buffer. */
168 if (data_len
< 512) {
171 new_p
= dev_alloc_pages(MT_RX_ORDER
);
176 while ((seg_len
= mt7601u_rx_next_seg_len(data
, data_len
))) {
177 mt7601u_rx_process_seg(dev
, data
, seg_len
, e
->p
, paged
);
185 trace_mt_rx_dma_aggr(dev
, cnt
, paged
);
188 /* we have one extra ref from the allocator */
189 __free_pages(e
->p
, MT_RX_ORDER
);
195 static struct mt7601u_dma_buf_rx
*
196 mt7601u_rx_get_pending_entry(struct mt7601u_dev
*dev
)
198 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
199 struct mt7601u_dma_buf_rx
*buf
= NULL
;
202 spin_lock_irqsave(&dev
->rx_lock
, flags
);
207 buf
= &q
->e
[q
->start
];
209 q
->start
= (q
->start
+ 1) % q
->entries
;
211 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
216 static void mt7601u_complete_rx(struct urb
*urb
)
218 struct mt7601u_dev
*dev
= urb
->context
;
219 struct mt7601u_rx_queue
*q
= &dev
->rx_q
;
222 spin_lock_irqsave(&dev
->rx_lock
, flags
);
224 if (mt7601u_urb_has_error(urb
))
225 dev_err(dev
->dev
, "Error: RX urb failed:%d\n", urb
->status
);
226 if (WARN_ONCE(q
->e
[q
->end
].urb
!= urb
, "RX urb mismatch"))
229 q
->end
= (q
->end
+ 1) % q
->entries
;
231 tasklet_schedule(&dev
->rx_tasklet
);
233 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
236 static void mt7601u_rx_tasklet(unsigned long data
)
238 struct mt7601u_dev
*dev
= (struct mt7601u_dev
*) data
;
239 struct mt7601u_dma_buf_rx
*e
;
241 while ((e
= mt7601u_rx_get_pending_entry(dev
))) {
245 mt7601u_rx_process_entry(dev
, e
);
246 mt7601u_submit_rx_buf(dev
, e
, GFP_ATOMIC
);
250 static void mt7601u_complete_tx(struct urb
*urb
)
252 struct mt7601u_tx_queue
*q
= urb
->context
;
253 struct mt7601u_dev
*dev
= q
->dev
;
257 spin_lock_irqsave(&dev
->tx_lock
, flags
);
259 if (mt7601u_urb_has_error(urb
))
260 dev_err(dev
->dev
, "Error: TX urb failed:%d\n", urb
->status
);
261 if (WARN_ONCE(q
->e
[q
->start
].urb
!= urb
, "TX urb mismatch"))
264 skb
= q
->e
[q
->start
].skb
;
265 trace_mt_tx_dma_done(dev
, skb
);
267 mt7601u_tx_status(dev
, skb
);
269 if (q
->used
== q
->entries
- q
->entries
/ 8)
270 ieee80211_wake_queue(dev
->hw
, skb_get_queue_mapping(skb
));
272 q
->start
= (q
->start
+ 1) % q
->entries
;
278 set_bit(MT7601U_STATE_MORE_STATS
, &dev
->state
);
279 if (!test_and_set_bit(MT7601U_STATE_READING_STATS
, &dev
->state
))
280 queue_delayed_work(dev
->stat_wq
, &dev
->stat_work
,
281 msecs_to_jiffies(10));
283 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
286 static int mt7601u_dma_submit_tx(struct mt7601u_dev
*dev
,
287 struct sk_buff
*skb
, u8 ep
)
289 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
290 unsigned snd_pipe
= usb_sndbulkpipe(usb_dev
, dev
->out_eps
[ep
]);
291 struct mt7601u_dma_buf_tx
*e
;
292 struct mt7601u_tx_queue
*q
= &dev
->tx_q
[ep
];
296 spin_lock_irqsave(&dev
->tx_lock
, flags
);
298 if (WARN_ON(q
->entries
<= q
->used
)) {
305 usb_fill_bulk_urb(e
->urb
, usb_dev
, snd_pipe
, skb
->data
, skb
->len
,
306 mt7601u_complete_tx
, q
);
307 ret
= usb_submit_urb(e
->urb
, GFP_ATOMIC
);
309 /* Special-handle ENODEV from TX urb submission because it will
310 * often be the first ENODEV we see after device is removed.
313 set_bit(MT7601U_STATE_REMOVED
, &dev
->state
);
315 dev_err(dev
->dev
, "Error: TX urb submit failed:%d\n",
320 q
->end
= (q
->end
+ 1) % q
->entries
;
323 if (q
->used
>= q
->entries
)
324 ieee80211_stop_queue(dev
->hw
, skb_get_queue_mapping(skb
));
326 spin_unlock_irqrestore(&dev
->tx_lock
, flags
);
331 /* Map hardware Q to USB endpoint number */
332 static u8
q2ep(u8 qid
)
334 /* TODO: take management packets to queue 5 */
338 /* Map USB endpoint number to Q id in the DMA engine */
339 static enum mt76_qsel
ep2dmaq(u8 ep
)
346 int mt7601u_dma_enqueue_tx(struct mt7601u_dev
*dev
, struct sk_buff
*skb
,
347 struct mt76_wcid
*wcid
, int hw_q
)
353 dma_flags
= MT_TXD_PKT_INFO_80211
;
354 if (wcid
->hw_key_idx
== 0xff)
355 dma_flags
|= MT_TXD_PKT_INFO_WIV
;
357 ret
= mt7601u_dma_skb_wrap_pkt(skb
, ep2dmaq(ep
), dma_flags
);
361 ret
= mt7601u_dma_submit_tx(dev
, skb
, ep
);
363 ieee80211_free_txskb(dev
->hw
, skb
);
370 static void mt7601u_kill_rx(struct mt7601u_dev
*dev
)
375 spin_lock_irqsave(&dev
->rx_lock
, flags
);
377 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
378 int next
= dev
->rx_q
.end
;
380 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
381 usb_poison_urb(dev
->rx_q
.e
[next
].urb
);
382 spin_lock_irqsave(&dev
->rx_lock
, flags
);
385 spin_unlock_irqrestore(&dev
->rx_lock
, flags
);
388 static int mt7601u_submit_rx_buf(struct mt7601u_dev
*dev
,
389 struct mt7601u_dma_buf_rx
*e
, gfp_t gfp
)
391 struct usb_device
*usb_dev
= mt7601u_to_usb_dev(dev
);
392 u8
*buf
= page_address(e
->p
);
396 pipe
= usb_rcvbulkpipe(usb_dev
, dev
->in_eps
[MT_EP_IN_PKT_RX
]);
398 usb_fill_bulk_urb(e
->urb
, usb_dev
, pipe
, buf
, MT_RX_URB_SIZE
,
399 mt7601u_complete_rx
, dev
);
401 trace_mt_submit_urb(dev
, e
->urb
);
402 ret
= usb_submit_urb(e
->urb
, gfp
);
404 dev_err(dev
->dev
, "Error: submit RX URB failed:%d\n", ret
);
409 static int mt7601u_submit_rx(struct mt7601u_dev
*dev
)
413 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
414 ret
= mt7601u_submit_rx_buf(dev
, &dev
->rx_q
.e
[i
], GFP_KERNEL
);
422 static void mt7601u_free_rx(struct mt7601u_dev
*dev
)
426 for (i
= 0; i
< dev
->rx_q
.entries
; i
++) {
427 __free_pages(dev
->rx_q
.e
[i
].p
, MT_RX_ORDER
);
428 usb_free_urb(dev
->rx_q
.e
[i
].urb
);
432 static int mt7601u_alloc_rx(struct mt7601u_dev
*dev
)
436 memset(&dev
->rx_q
, 0, sizeof(dev
->rx_q
));
438 dev
->rx_q
.entries
= N_RX_ENTRIES
;
440 for (i
= 0; i
< N_RX_ENTRIES
; i
++) {
441 dev
->rx_q
.e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
442 dev
->rx_q
.e
[i
].p
= dev_alloc_pages(MT_RX_ORDER
);
444 if (!dev
->rx_q
.e
[i
].urb
|| !dev
->rx_q
.e
[i
].p
)
451 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue
*q
)
457 for (i
= 0; i
< q
->entries
; i
++) {
458 usb_poison_urb(q
->e
[i
].urb
);
459 usb_free_urb(q
->e
[i
].urb
);
463 static void mt7601u_free_tx(struct mt7601u_dev
*dev
)
467 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
468 mt7601u_free_tx_queue(&dev
->tx_q
[i
]);
471 static int mt7601u_alloc_tx_queue(struct mt7601u_dev
*dev
,
472 struct mt7601u_tx_queue
*q
)
477 q
->entries
= N_TX_ENTRIES
;
479 for (i
= 0; i
< N_TX_ENTRIES
; i
++) {
480 q
->e
[i
].urb
= usb_alloc_urb(0, GFP_KERNEL
);
488 static int mt7601u_alloc_tx(struct mt7601u_dev
*dev
)
492 dev
->tx_q
= devm_kcalloc(dev
->dev
, __MT_EP_OUT_MAX
,
493 sizeof(*dev
->tx_q
), GFP_KERNEL
);
495 for (i
= 0; i
< __MT_EP_OUT_MAX
; i
++)
496 if (mt7601u_alloc_tx_queue(dev
, &dev
->tx_q
[i
]))
502 int mt7601u_dma_init(struct mt7601u_dev
*dev
)
506 tasklet_init(&dev
->rx_tasklet
, mt7601u_rx_tasklet
, (unsigned long) dev
);
508 ret
= mt7601u_alloc_tx(dev
);
511 ret
= mt7601u_alloc_rx(dev
);
515 ret
= mt7601u_submit_rx(dev
);
521 mt7601u_dma_cleanup(dev
);
525 void mt7601u_dma_cleanup(struct mt7601u_dev
*dev
)
527 mt7601u_kill_rx(dev
);
529 tasklet_kill(&dev
->rx_tasklet
);
531 mt7601u_free_rx(dev
);
532 mt7601u_free_tx(dev
);