]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/wireless/ath/wcn36xx/dxe.c
wcn36xx: Correct DXE chip version differentiation
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / ath / wcn36xx / dxe.c
CommitLineData
8e84c258
EK
1/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include "wcn36xx.h"
27#include "txrx.h"
28
29void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30{
31 struct wcn36xx_dxe_ch *ch = is_low ?
32 &wcn->dxe_tx_l_ch :
33 &wcn->dxe_tx_h_ch;
34
35 return ch->head_blk_ctl->bd_cpu_addr;
36}
37
05ddce49
BA
38static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
39{
40 wcn36xx_dbg(WCN36XX_DBG_DXE,
41 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
42 addr, data);
43
44 writel(data, wcn->ccu_base + addr);
45}
46
8e84c258
EK
47static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
48{
49 wcn36xx_dbg(WCN36XX_DBG_DXE,
50 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
51 addr, data);
52
05ddce49 53 writel(data, wcn->dxe_base + addr);
8e84c258
EK
54}
55
56static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
57{
05ddce49 58 *data = readl(wcn->dxe_base + addr);
8e84c258
EK
59
60 wcn36xx_dbg(WCN36XX_DBG_DXE,
61 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
62 addr, *data);
63}
64
65static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
66{
67 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
68 int i;
69
70 for (i = 0; i < ch->desc_num && ctl; i++) {
71 next = ctl->next;
72 kfree(ctl);
73 ctl = next;
74 }
75}
76
77static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
78{
79 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
80 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
81 int i;
82
8e8e54c4 83 spin_lock_init(&ch->lock);
8e84c258
EK
84 for (i = 0; i < ch->desc_num; i++) {
85 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
86 if (!cur_ctl)
87 goto out_fail;
88
90dccb73 89 spin_lock_init(&cur_ctl->skb_lock);
8e84c258
EK
90 cur_ctl->ctl_blk_order = i;
91 if (i == 0) {
92 ch->head_blk_ctl = cur_ctl;
93 ch->tail_blk_ctl = cur_ctl;
94 } else if (ch->desc_num - 1 == i) {
95 prev_ctl->next = cur_ctl;
96 cur_ctl->next = ch->head_blk_ctl;
97 } else {
98 prev_ctl->next = cur_ctl;
99 }
100 prev_ctl = cur_ctl;
101 }
102
103 return 0;
104
105out_fail:
106 wcn36xx_dxe_free_ctl_block(ch);
107 return -ENOMEM;
108}
109
110int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
111{
112 int ret;
113
114 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
115 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
116 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
117 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
118
119 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
120 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
121 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
122 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
123
124 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
125 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
126
127 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
128 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
129
130 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
131 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
132
133 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
134 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
135
136 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
137 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
138
139 /* DXE control block allocation */
140 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
141 if (ret)
142 goto out_err;
143 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
144 if (ret)
145 goto out_err;
146 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
147 if (ret)
148 goto out_err;
149 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
150 if (ret)
151 goto out_err;
152
153 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
154 ret = wcn->ctrl_ops->smsm_change_state(
155 WCN36XX_SMSM_WLAN_TX_ENABLE,
156 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
157
158 return 0;
159
160out_err:
161 wcn36xx_err("Failed to allocate DXE control blocks\n");
162 wcn36xx_dxe_free_ctl_blks(wcn);
163 return -ENOMEM;
164}
165
166void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
167{
168 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
169 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
170 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
171 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
172}
173
07225524 174static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
8e84c258
EK
175{
176 struct wcn36xx_dxe_desc *cur_dxe = NULL;
177 struct wcn36xx_dxe_desc *prev_dxe = NULL;
178 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
179 size_t size;
180 int i;
181
182 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
07225524 183 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
8e84c258
EK
184 GFP_KERNEL);
185 if (!wcn_ch->cpu_addr)
186 return -ENOMEM;
187
188 memset(wcn_ch->cpu_addr, 0, size);
189
190 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
191 cur_ctl = wcn_ch->head_blk_ctl;
192
193 for (i = 0; i < wcn_ch->desc_num; i++) {
194 cur_ctl->desc = cur_dxe;
195 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
196 i * sizeof(struct wcn36xx_dxe_desc);
197
198 switch (wcn_ch->ch_type) {
199 case WCN36XX_DXE_CH_TX_L:
200 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
201 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
202 break;
203 case WCN36XX_DXE_CH_TX_H:
204 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
205 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
206 break;
207 case WCN36XX_DXE_CH_RX_L:
208 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
209 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
210 break;
211 case WCN36XX_DXE_CH_RX_H:
212 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
213 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
214 break;
215 }
216 if (0 == i) {
217 cur_dxe->phy_next_l = 0;
218 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
219 prev_dxe->phy_next_l =
220 cur_ctl->desc_phy_addr;
221 } else if (i == (wcn_ch->desc_num - 1)) {
222 prev_dxe->phy_next_l =
223 cur_ctl->desc_phy_addr;
224 cur_dxe->phy_next_l =
225 wcn_ch->head_blk_ctl->desc_phy_addr;
226 }
227 cur_ctl = cur_ctl->next;
228 prev_dxe = cur_dxe;
229 cur_dxe++;
230 }
231
232 return 0;
233}
234
235static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
236 struct wcn36xx_dxe_mem_pool *pool)
237{
238 int i, chunk_size = pool->chunk_size;
239 dma_addr_t bd_phy_addr = pool->phy_addr;
240 void *bd_cpu_addr = pool->virt_addr;
241 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
242
243 for (i = 0; i < ch->desc_num; i++) {
244 /* Only every second dxe needs a bd pointer,
245 the other will point to the skb data */
246 if (!(i & 1)) {
247 cur->bd_phy_addr = bd_phy_addr;
248 cur->bd_cpu_addr = bd_cpu_addr;
249 bd_phy_addr += chunk_size;
250 bd_cpu_addr += chunk_size;
251 } else {
252 cur->bd_phy_addr = 0;
253 cur->bd_cpu_addr = NULL;
254 }
255 cur = cur->next;
256 }
257}
258
259static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
260{
261 int reg_data = 0;
262
263 wcn36xx_dxe_read_register(wcn,
264 WCN36XX_DXE_INT_MASK_REG,
265 &reg_data);
266
267 reg_data |= wcn_ch;
268
269 wcn36xx_dxe_write_register(wcn,
270 WCN36XX_DXE_INT_MASK_REG,
271 (int)reg_data);
272 return 0;
273}
274
07225524 275static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
8e84c258
EK
276{
277 struct wcn36xx_dxe_desc *dxe = ctl->desc;
278 struct sk_buff *skb;
279
280 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
281 if (skb == NULL)
282 return -ENOMEM;
283
07225524 284 dxe->dst_addr_l = dma_map_single(dev,
8e84c258
EK
285 skb_tail_pointer(skb),
286 WCN36XX_PKT_SIZE,
287 DMA_FROM_DEVICE);
288 ctl->skb = skb;
289
290 return 0;
291}
292
293static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
294 struct wcn36xx_dxe_ch *wcn_ch)
295{
296 int i;
297 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
298
299 cur_ctl = wcn_ch->head_blk_ctl;
300
301 for (i = 0; i < wcn_ch->desc_num; i++) {
07225524 302 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
8e84c258
EK
303 cur_ctl = cur_ctl->next;
304 }
305
306 return 0;
307}
308
309static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
310 struct wcn36xx_dxe_ch *wcn_ch)
311{
312 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
313 int i;
314
315 for (i = 0; i < wcn_ch->desc_num; i++) {
316 kfree_skb(cur->skb);
317 cur = cur->next;
318 }
319}
320
321void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
322{
323 struct ieee80211_tx_info *info;
324 struct sk_buff *skb;
325 unsigned long flags;
326
327 spin_lock_irqsave(&wcn->dxe_lock, flags);
328 skb = wcn->tx_ack_skb;
329 wcn->tx_ack_skb = NULL;
330 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
331
332 if (!skb) {
333 wcn36xx_warn("Spurious TX complete indication\n");
334 return;
335 }
336
337 info = IEEE80211_SKB_CB(skb);
338
339 if (status == 1)
340 info->flags |= IEEE80211_TX_STAT_ACK;
341
342 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
343
344 ieee80211_tx_status_irqsafe(wcn->hw, skb);
345 ieee80211_wake_queues(wcn->hw);
346}
347
348static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
349{
8e8e54c4 350 struct wcn36xx_dxe_ctl *ctl;
8e84c258
EK
351 struct ieee80211_tx_info *info;
352 unsigned long flags;
353
354 /*
355 * Make at least one loop of do-while because in case ring is
356 * completely full head and tail are pointing to the same element
357 * and while-do will not make any cycles.
358 */
8e8e54c4
BC
359 spin_lock_irqsave(&ch->lock, flags);
360 ctl = ch->tail_blk_ctl;
8e84c258 361 do {
bfa6696b
BC
362 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
363 break;
8e84c258 364 if (ctl->skb) {
07225524 365 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
8e84c258
EK
366 ctl->skb->len, DMA_TO_DEVICE);
367 info = IEEE80211_SKB_CB(ctl->skb);
368 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
369 /* Keep frame until TX status comes */
370 ieee80211_free_txskb(wcn->hw, ctl->skb);
371 }
8e8e54c4 372 spin_lock(&ctl->skb_lock);
8e84c258
EK
373 if (wcn->queues_stopped) {
374 wcn->queues_stopped = false;
375 ieee80211_wake_queues(wcn->hw);
376 }
8e8e54c4 377 spin_unlock(&ctl->skb_lock);
8e84c258
EK
378
379 ctl->skb = NULL;
380 }
381 ctl = ctl->next;
382 } while (ctl != ch->head_blk_ctl &&
383 !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
384
385 ch->tail_blk_ctl = ctl;
8e8e54c4 386 spin_unlock_irqrestore(&ch->lock, flags);
8e84c258
EK
387}
388
389static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
390{
391 struct wcn36xx *wcn = (struct wcn36xx *)dev;
392 int int_src, int_reason;
393
394 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
395
396 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
397 wcn36xx_dxe_read_register(wcn,
398 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
399 &int_reason);
400
401 /* TODO: Check int_reason */
402
403 wcn36xx_dxe_write_register(wcn,
404 WCN36XX_DXE_0_INT_CLR,
405 WCN36XX_INT_MASK_CHAN_TX_H);
406
407 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
408 WCN36XX_INT_MASK_CHAN_TX_H);
409 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
410 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
411 }
412
413 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
414 wcn36xx_dxe_read_register(wcn,
415 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
416 &int_reason);
417 /* TODO: Check int_reason */
418
419 wcn36xx_dxe_write_register(wcn,
420 WCN36XX_DXE_0_INT_CLR,
421 WCN36XX_INT_MASK_CHAN_TX_L);
422
423 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
424 WCN36XX_INT_MASK_CHAN_TX_L);
425 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
426 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
427 }
428
429 return IRQ_HANDLED;
430}
431
432static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
433{
434 struct wcn36xx *wcn = (struct wcn36xx *)dev;
435
436 disable_irq_nosync(wcn->rx_irq);
437 wcn36xx_dxe_rx_frame(wcn);
438 enable_irq(wcn->rx_irq);
439 return IRQ_HANDLED;
440}
441
442static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
443{
444 int ret;
445
446 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
447 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
448 if (ret) {
449 wcn36xx_err("failed to alloc tx irq\n");
450 goto out_err;
451 }
452
453 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
454 "wcn36xx_rx", wcn);
455 if (ret) {
456 wcn36xx_err("failed to alloc rx irq\n");
457 goto out_txirq;
458 }
459
460 enable_irq_wake(wcn->rx_irq);
461
462 return 0;
463
464out_txirq:
465 free_irq(wcn->tx_irq, wcn);
466out_err:
467 return ret;
468
469}
470
471static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
472 struct wcn36xx_dxe_ch *ch)
473{
474 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
475 struct wcn36xx_dxe_desc *dxe = ctl->desc;
476 dma_addr_t dma_addr;
477 struct sk_buff *skb;
2ec7752f
FY
478 int ret = 0, int_mask;
479 u32 value;
480
481 if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
482 value = WCN36XX_DXE_CTRL_RX_L;
483 int_mask = WCN36XX_DXE_INT_CH1_MASK;
484 } else {
485 value = WCN36XX_DXE_CTRL_RX_H;
486 int_mask = WCN36XX_DXE_INT_CH3_MASK;
487 }
8e84c258
EK
488
489 while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
490 skb = ctl->skb;
491 dma_addr = dxe->dst_addr_l;
2ec7752f
FY
492 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
493 if (0 == ret) {
494 /* new skb allocation ok. Use the new one and queue
495 * the old one to network system.
496 */
497 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
498 DMA_FROM_DEVICE);
499 wcn36xx_rx_skb(wcn, skb);
500 } /* else keep old skb not submitted and use it for rx DMA */
501
2ec7752f 502 dxe->ctrl = value;
8e84c258
EK
503 ctl = ctl->next;
504 dxe = ctl->desc;
505 }
9d5db23e 506 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
8e84c258
EK
507
508 ch->head_blk_ctl = ctl;
8e84c258
EK
509 return 0;
510}
511
512void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
513{
514 int int_src;
515
516 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
517
518 /* RX_LOW_PRI */
519 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
520 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
521 WCN36XX_DXE_INT_CH1_MASK);
522 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
523 }
524
525 /* RX_HIGH_PRI */
526 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
527 /* Clean up all the INT within this channel */
528 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
529 WCN36XX_DXE_INT_CH3_MASK);
530 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
531 }
532
533 if (!int_src)
534 wcn36xx_warn("No DXE interrupt pending\n");
535}
536
537int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
538{
539 size_t s;
540 void *cpu_addr;
541
542 /* Allocate BD headers for MGMT frames */
543
544 /* Where this come from ask QC */
545 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
546 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
547
548 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
07225524 549 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
8e84c258
EK
550 GFP_KERNEL);
551 if (!cpu_addr)
552 goto out_err;
553
554 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
555 memset(cpu_addr, 0, s);
556
557 /* Allocate BD headers for DATA frames */
558
559 /* Where this come from ask QC */
560 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
561 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
562
563 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
07225524 564 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
8e84c258
EK
565 GFP_KERNEL);
566 if (!cpu_addr)
567 goto out_err;
568
569 wcn->data_mem_pool.virt_addr = cpu_addr;
570 memset(cpu_addr, 0, s);
571
572 return 0;
573
574out_err:
575 wcn36xx_dxe_free_mem_pools(wcn);
576 wcn36xx_err("Failed to allocate BD mempool\n");
577 return -ENOMEM;
578}
579
580void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
581{
582 if (wcn->mgmt_mem_pool.virt_addr)
07225524 583 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
8e84c258
EK
584 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
585 wcn->mgmt_mem_pool.virt_addr,
586 wcn->mgmt_mem_pool.phy_addr);
587
588 if (wcn->data_mem_pool.virt_addr) {
07225524 589 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
8e84c258
EK
590 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
591 wcn->data_mem_pool.virt_addr,
592 wcn->data_mem_pool.phy_addr);
593 }
594}
595
596int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
597 struct wcn36xx_vif *vif_priv,
598 struct sk_buff *skb,
599 bool is_low)
600{
601 struct wcn36xx_dxe_ctl *ctl = NULL;
602 struct wcn36xx_dxe_desc *desc = NULL;
603 struct wcn36xx_dxe_ch *ch = NULL;
604 unsigned long flags;
8e8e54c4 605 int ret;
8e84c258
EK
606
607 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
608
8e8e54c4 609 spin_lock_irqsave(&ch->lock, flags);
8e84c258
EK
610 ctl = ch->head_blk_ctl;
611
8e8e54c4 612 spin_lock(&ctl->next->skb_lock);
8e84c258
EK
613
614 /*
615 * If skb is not null that means that we reached the tail of the ring
616 * hence ring is full. Stop queues to let mac80211 back off until ring
617 * has an empty slot again.
618 */
619 if (NULL != ctl->next->skb) {
620 ieee80211_stop_queues(wcn->hw);
621 wcn->queues_stopped = true;
8e8e54c4
BC
622 spin_unlock(&ctl->next->skb_lock);
623 spin_unlock_irqrestore(&ch->lock, flags);
8e84c258
EK
624 return -EBUSY;
625 }
8e8e54c4 626 spin_unlock(&ctl->next->skb_lock);
8e84c258
EK
627
628 ctl->skb = NULL;
629 desc = ctl->desc;
630
631 /* Set source address of the BD we send */
632 desc->src_addr_l = ctl->bd_phy_addr;
633
634 desc->dst_addr_l = ch->dxe_wq;
635 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
636 desc->ctrl = ch->ctrl_bd;
637
638 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
639
640 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
641 (char *)desc, sizeof(*desc));
642 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
643 "BD >>> ", (char *)ctl->bd_cpu_addr,
644 sizeof(struct wcn36xx_tx_bd));
645
646 /* Set source address of the SKB we send */
647 ctl = ctl->next;
648 ctl->skb = skb;
649 desc = ctl->desc;
650 if (ctl->bd_cpu_addr) {
651 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
8e8e54c4
BC
652 ret = -EINVAL;
653 goto unlock;
8e84c258
EK
654 }
655
07225524 656 desc->src_addr_l = dma_map_single(wcn->dev,
8e84c258
EK
657 ctl->skb->data,
658 ctl->skb->len,
659 DMA_TO_DEVICE);
660
661 desc->dst_addr_l = ch->dxe_wq;
662 desc->fr_len = ctl->skb->len;
663
664 /* set dxe descriptor to VALID */
665 desc->ctrl = ch->ctrl_skb;
666
667 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
668 (char *)desc, sizeof(*desc));
669 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
670 (char *)ctl->skb->data, ctl->skb->len);
671
672 /* Move the head of the ring to the next empty descriptor */
673 ch->head_blk_ctl = ctl->next;
674
675 /*
676 * When connected and trying to send data frame chip can be in sleep
677 * mode and writing to the register will not wake up the chip. Instead
678 * notify chip about new frame through SMSM bus.
679 */
680 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
681 wcn->ctrl_ops->smsm_change_state(
682 0,
683 WCN36XX_SMSM_WLAN_TX_ENABLE);
684 } else {
685 /* indicate End Of Packet and generate interrupt on descriptor
686 * done.
687 */
688 wcn36xx_dxe_write_register(wcn,
689 ch->reg_ctrl, ch->def_ctrl);
690 }
691
8e8e54c4
BC
692 ret = 0;
693unlock:
694 spin_unlock_irqrestore(&ch->lock, flags);
695 return ret;
8e84c258
EK
696}
697
698int wcn36xx_dxe_init(struct wcn36xx *wcn)
699{
700 int reg_data = 0, ret;
701
702 reg_data = WCN36XX_DXE_REG_RESET;
703 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
704
6f10b4e1
BA
705 /* Select channels for rx avail and xfer done interrupts... */
706 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
707 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
708 if (wcn->is_pronto)
709 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
05ddce49 710 else
6f10b4e1 711 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
8e84c258
EK
712
713 /***************************************/
714 /* Init descriptors for TX LOW channel */
715 /***************************************/
07225524 716 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
8e84c258
EK
717 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
718
719 /* Write channel head to a NEXT register */
720 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
721 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
722
723 /* Program DMA destination addr for TX LOW */
724 wcn36xx_dxe_write_register(wcn,
725 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
726 WCN36XX_DXE_WQ_TX_L);
727
728 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
729 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
730
731 /***************************************/
732 /* Init descriptors for TX HIGH channel */
733 /***************************************/
07225524 734 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
8e84c258
EK
735 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
736
737 /* Write channel head to a NEXT register */
738 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
739 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
740
741 /* Program DMA destination addr for TX HIGH */
742 wcn36xx_dxe_write_register(wcn,
743 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
744 WCN36XX_DXE_WQ_TX_H);
745
746 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
747
748 /* Enable channel interrupts */
749 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
750
751 /***************************************/
752 /* Init descriptors for RX LOW channel */
753 /***************************************/
07225524 754 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
8e84c258
EK
755
756 /* For RX we need to preallocated buffers */
757 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
758
759 /* Write channel head to a NEXT register */
760 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
761 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
762
763 /* Write DMA source address */
764 wcn36xx_dxe_write_register(wcn,
765 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
766 WCN36XX_DXE_WQ_RX_L);
767
768 /* Program preallocated destination address */
769 wcn36xx_dxe_write_register(wcn,
770 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
771 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
772
773 /* Enable default control registers */
774 wcn36xx_dxe_write_register(wcn,
775 WCN36XX_DXE_REG_CTL_RX_L,
776 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
777
778 /* Enable channel interrupts */
779 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
780
781 /***************************************/
782 /* Init descriptors for RX HIGH channel */
783 /***************************************/
07225524 784 wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
8e84c258
EK
785
786 /* For RX we need to prealocat buffers */
787 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
788
789 /* Write chanel head to a NEXT register */
790 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
791 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
792
793 /* Write DMA source address */
794 wcn36xx_dxe_write_register(wcn,
795 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
796 WCN36XX_DXE_WQ_RX_H);
797
798 /* Program preallocated destination address */
799 wcn36xx_dxe_write_register(wcn,
800 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
801 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
802
803 /* Enable default control registers */
804 wcn36xx_dxe_write_register(wcn,
805 WCN36XX_DXE_REG_CTL_RX_H,
806 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
807
808 /* Enable channel interrupts */
809 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
810
811 ret = wcn36xx_dxe_request_irqs(wcn);
812 if (ret < 0)
813 goto out_err;
814
815 return 0;
816
817out_err:
818 return ret;
819}
820
821void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
822{
823 free_irq(wcn->tx_irq, wcn);
824 free_irq(wcn->rx_irq, wcn);
825
826 if (wcn->tx_ack_skb) {
827 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
828 wcn->tx_ack_skb = NULL;
829 }
830
831 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
832 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
833}