]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
brcmfmac: Fix WEP configuration for AP mode.
[mirror_ubuntu-focal-kernel.git] / drivers / net / wireless / brcm80211 / brcmfmac / msgbuf.c
CommitLineData
9a1bb602
HM
1/* Copyright (c) 2014 Broadcom Corporation
2 *
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
6 *
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16/*******************************************************************************
17 * Communicates with the dongle by using dcmd codes.
18 * For certain dcmd codes, the dongle interprets string data from the host.
19 ******************************************************************************/
20
21#include <linux/types.h>
22#include <linux/netdevice.h>
23
24#include <brcmu_utils.h>
25#include <brcmu_wifi.h>
26
122d3d04 27#include "core.h"
a8e8ed34 28#include "debug.h"
9a1bb602
HM
29#include "proto.h"
30#include "msgbuf.h"
31#include "commonring.h"
32#include "flowring.h"
d14f78b9 33#include "bus.h"
9a1bb602
HM
34#include "tracepoint.h"
35
36
37#define MSGBUF_IOCTL_RESP_TIMEOUT 2000
38
39#define MSGBUF_TYPE_GEN_STATUS 0x1
40#define MSGBUF_TYPE_RING_STATUS 0x2
41#define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
42#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
43#define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
44#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
45#define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
46#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
47#define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
48#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
49#define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
50#define MSGBUF_TYPE_IOCTL_CMPLT 0xC
51#define MSGBUF_TYPE_EVENT_BUF_POST 0xD
52#define MSGBUF_TYPE_WL_EVENT 0xE
53#define MSGBUF_TYPE_TX_POST 0xF
54#define MSGBUF_TYPE_TX_STATUS 0x10
55#define MSGBUF_TYPE_RXBUF_POST 0x11
56#define MSGBUF_TYPE_RX_CMPLT 0x12
57#define MSGBUF_TYPE_LPBK_DMAXFER 0x13
58#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
59
60#define NR_TX_PKTIDS 2048
61#define NR_RX_PKTIDS 1024
62
63#define BRCMF_IOCTL_REQ_PKTID 0xFFFE
64
65#define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
66#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
67#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
68#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
69
70#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
71#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
72
73#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
74#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
75
76
77struct msgbuf_common_hdr {
78 u8 msgtype;
79 u8 ifidx;
80 u8 flags;
81 u8 rsvd0;
82 __le32 request_id;
83};
84
85struct msgbuf_buf_addr {
86 __le32 low_addr;
87 __le32 high_addr;
88};
89
90struct msgbuf_ioctl_req_hdr {
91 struct msgbuf_common_hdr msg;
92 __le32 cmd;
93 __le16 trans_id;
94 __le16 input_buf_len;
95 __le16 output_buf_len;
96 __le16 rsvd0[3];
97 struct msgbuf_buf_addr req_buf_addr;
98 __le32 rsvd1[2];
99};
100
101struct msgbuf_tx_msghdr {
102 struct msgbuf_common_hdr msg;
103 u8 txhdr[ETH_HLEN];
104 u8 flags;
105 u8 seg_cnt;
106 struct msgbuf_buf_addr metadata_buf_addr;
107 struct msgbuf_buf_addr data_buf_addr;
108 __le16 metadata_buf_len;
109 __le16 data_len;
110 __le32 rsvd0;
111};
112
113struct msgbuf_rx_bufpost {
114 struct msgbuf_common_hdr msg;
115 __le16 metadata_buf_len;
116 __le16 data_buf_len;
117 __le32 rsvd0;
118 struct msgbuf_buf_addr metadata_buf_addr;
119 struct msgbuf_buf_addr data_buf_addr;
120};
121
122struct msgbuf_rx_ioctl_resp_or_event {
123 struct msgbuf_common_hdr msg;
124 __le16 host_buf_len;
125 __le16 rsvd0[3];
126 struct msgbuf_buf_addr host_buf_addr;
127 __le32 rsvd1[4];
128};
129
130struct msgbuf_completion_hdr {
131 __le16 status;
132 __le16 flow_ring_id;
133};
134
135struct msgbuf_rx_event {
136 struct msgbuf_common_hdr msg;
137 struct msgbuf_completion_hdr compl_hdr;
138 __le16 event_data_len;
139 __le16 seqnum;
140 __le16 rsvd0[4];
141};
142
143struct msgbuf_ioctl_resp_hdr {
144 struct msgbuf_common_hdr msg;
145 struct msgbuf_completion_hdr compl_hdr;
146 __le16 resp_len;
147 __le16 trans_id;
148 __le32 cmd;
149 __le32 rsvd0;
150};
151
152struct msgbuf_tx_status {
153 struct msgbuf_common_hdr msg;
154 struct msgbuf_completion_hdr compl_hdr;
155 __le16 metadata_len;
156 __le16 tx_status;
157};
158
159struct msgbuf_rx_complete {
160 struct msgbuf_common_hdr msg;
161 struct msgbuf_completion_hdr compl_hdr;
162 __le16 metadata_len;
163 __le16 data_len;
164 __le16 data_offset;
165 __le16 flags;
166 __le32 rx_status_0;
167 __le32 rx_status_1;
168 __le32 rsvd0;
169};
170
171struct msgbuf_tx_flowring_create_req {
172 struct msgbuf_common_hdr msg;
173 u8 da[ETH_ALEN];
174 u8 sa[ETH_ALEN];
175 u8 tid;
176 u8 if_flags;
177 __le16 flow_ring_id;
178 u8 tc;
179 u8 priority;
180 __le16 int_vector;
181 __le16 max_items;
182 __le16 len_item;
183 struct msgbuf_buf_addr flow_ring_addr;
184};
185
186struct msgbuf_tx_flowring_delete_req {
187 struct msgbuf_common_hdr msg;
188 __le16 flow_ring_id;
189 __le16 reason;
190 __le32 rsvd0[7];
191};
192
193struct msgbuf_flowring_create_resp {
194 struct msgbuf_common_hdr msg;
195 struct msgbuf_completion_hdr compl_hdr;
196 __le32 rsvd0[3];
197};
198
199struct msgbuf_flowring_delete_resp {
200 struct msgbuf_common_hdr msg;
201 struct msgbuf_completion_hdr compl_hdr;
202 __le32 rsvd0[3];
203};
204
205struct msgbuf_flowring_flush_resp {
206 struct msgbuf_common_hdr msg;
207 struct msgbuf_completion_hdr compl_hdr;
208 __le32 rsvd0[3];
209};
210
3ba06610
HM
211struct brcmf_msgbuf_work_item {
212 struct list_head queue;
213 u32 flowid;
214 int ifidx;
215 u8 sa[ETH_ALEN];
216 u8 da[ETH_ALEN];
217};
218
9a1bb602
HM
219struct brcmf_msgbuf {
220 struct brcmf_pub *drvr;
221
222 struct brcmf_commonring **commonrings;
223 struct brcmf_commonring **flowrings;
224 dma_addr_t *flowring_dma_handle;
225 u16 nrof_flowrings;
226
227 u16 rx_dataoffset;
228 u32 max_rxbufpost;
229 u16 rx_metadata_offset;
230 u32 rxbufpost;
231
232 u32 max_ioctlrespbuf;
233 u32 cur_ioctlrespbuf;
234 u32 max_eventbuf;
235 u32 cur_eventbuf;
236
237 void *ioctbuf;
238 dma_addr_t ioctbuf_handle;
239 u32 ioctbuf_phys_hi;
240 u32 ioctbuf_phys_lo;
ff0a6230 241 int ioctl_resp_status;
9a1bb602
HM
242 u32 ioctl_resp_ret_len;
243 u32 ioctl_resp_pktid;
244
245 u16 data_seq_no;
246 u16 ioctl_seq_no;
247 u32 reqid;
248 wait_queue_head_t ioctl_resp_wait;
249 bool ctl_completed;
250
251 struct brcmf_msgbuf_pktids *tx_pktids;
252 struct brcmf_msgbuf_pktids *rx_pktids;
253 struct brcmf_flowring *flow;
254
255 struct workqueue_struct *txflow_wq;
256 struct work_struct txflow_work;
257 unsigned long *flow_map;
258 unsigned long *txstatus_done_map;
3ba06610
HM
259
260 struct work_struct flowring_work;
261 spinlock_t flowring_work_lock;
262 struct list_head work_queue;
9a1bb602
HM
263};
264
265struct brcmf_msgbuf_pktid {
266 atomic_t allocated;
267 u16 data_offset;
268 struct sk_buff *skb;
269 dma_addr_t physaddr;
270};
271
272struct brcmf_msgbuf_pktids {
273 u32 array_size;
274 u32 last_allocated_idx;
275 enum dma_data_direction direction;
276 struct brcmf_msgbuf_pktid *array;
277};
278
279
280/* dma flushing needs implementation for mips and arm platforms. Should
281 * be put in util. Note, this is not real flushing. It is virtual non
282 * cached memory. Only write buffers should have to be drained. Though
283 * this may be different depending on platform......
284 */
285#define brcmf_dma_flush(addr, len)
286#define brcmf_dma_invalidate_cache(addr, len)
287
288
289static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
290
291
292static struct brcmf_msgbuf_pktids *
293brcmf_msgbuf_init_pktids(u32 nr_array_entries,
294 enum dma_data_direction direction)
295{
296 struct brcmf_msgbuf_pktid *array;
297 struct brcmf_msgbuf_pktids *pktids;
298
3ba06610 299 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
9a1bb602
HM
300 if (!array)
301 return NULL;
302
3ba06610 303 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
9a1bb602
HM
304 if (!pktids) {
305 kfree(array);
306 return NULL;
307 }
308 pktids->array = array;
309 pktids->array_size = nr_array_entries;
310
311 return pktids;
312}
313
314
315static int
316brcmf_msgbuf_alloc_pktid(struct device *dev,
317 struct brcmf_msgbuf_pktids *pktids,
318 struct sk_buff *skb, u16 data_offset,
319 dma_addr_t *physaddr, u32 *idx)
320{
321 struct brcmf_msgbuf_pktid *array;
322 u32 count;
323
324 array = pktids->array;
325
326 *physaddr = dma_map_single(dev, skb->data + data_offset,
327 skb->len - data_offset, pktids->direction);
328
329 if (dma_mapping_error(dev, *physaddr)) {
330 brcmf_err("dma_map_single failed !!\n");
331 return -ENOMEM;
332 }
333
334 *idx = pktids->last_allocated_idx;
335
336 count = 0;
337 do {
338 (*idx)++;
339 if (*idx == pktids->array_size)
340 *idx = 0;
341 if (array[*idx].allocated.counter == 0)
342 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
343 break;
344 count++;
345 } while (count < pktids->array_size);
346
347 if (count == pktids->array_size)
348 return -ENOMEM;
349
350 array[*idx].data_offset = data_offset;
351 array[*idx].physaddr = *physaddr;
352 array[*idx].skb = skb;
353
354 pktids->last_allocated_idx = *idx;
355
356 return 0;
357}
358
359
360static struct sk_buff *
361brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
362 u32 idx)
363{
364 struct brcmf_msgbuf_pktid *pktid;
365 struct sk_buff *skb;
366
367 if (idx >= pktids->array_size) {
368 brcmf_err("Invalid packet id %d (max %d)\n", idx,
369 pktids->array_size);
370 return NULL;
371 }
372 if (pktids->array[idx].allocated.counter) {
373 pktid = &pktids->array[idx];
374 dma_unmap_single(dev, pktid->physaddr,
375 pktid->skb->len - pktid->data_offset,
376 pktids->direction);
377 skb = pktid->skb;
378 pktid->allocated.counter = 0;
379 return skb;
380 } else {
381 brcmf_err("Invalid packet id %d (not in use)\n", idx);
382 }
383
384 return NULL;
385}
386
387
388static void
389brcmf_msgbuf_release_array(struct device *dev,
390 struct brcmf_msgbuf_pktids *pktids)
391{
392 struct brcmf_msgbuf_pktid *array;
393 struct brcmf_msgbuf_pktid *pktid;
394 u32 count;
395
396 array = pktids->array;
397 count = 0;
398 do {
399 if (array[count].allocated.counter) {
400 pktid = &array[count];
401 dma_unmap_single(dev, pktid->physaddr,
402 pktid->skb->len - pktid->data_offset,
403 pktids->direction);
404 brcmu_pkt_buf_free_skb(pktid->skb);
405 }
406 count++;
407 } while (count < pktids->array_size);
408
409 kfree(array);
410 kfree(pktids);
411}
412
413
414static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
415{
416 if (msgbuf->rx_pktids)
417 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
418 msgbuf->rx_pktids);
419 if (msgbuf->tx_pktids)
420 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
421 msgbuf->tx_pktids);
422}
423
424
425static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
426 uint cmd, void *buf, uint len)
427{
428 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
429 struct brcmf_commonring *commonring;
430 struct msgbuf_ioctl_req_hdr *request;
431 u16 buf_len;
432 void *ret_ptr;
433 int err;
434
435 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
436 brcmf_commonring_lock(commonring);
437 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
438 if (!ret_ptr) {
439 brcmf_err("Failed to reserve space in commonring\n");
440 brcmf_commonring_unlock(commonring);
441 return -ENOMEM;
442 }
443
444 msgbuf->reqid++;
445
446 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
447 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
448 request->msg.ifidx = (u8)ifidx;
449 request->msg.flags = 0;
450 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
451 request->cmd = cpu_to_le32(cmd);
452 request->output_buf_len = cpu_to_le16(len);
453 request->trans_id = cpu_to_le16(msgbuf->reqid);
454
455 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
456 request->input_buf_len = cpu_to_le16(buf_len);
457 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
458 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
459 if (buf)
460 memcpy(msgbuf->ioctbuf, buf, buf_len);
461 else
462 memset(msgbuf->ioctbuf, 0, buf_len);
463 brcmf_dma_flush(ioctl_buf, buf_len);
464
465 err = brcmf_commonring_write_complete(commonring);
466 brcmf_commonring_unlock(commonring);
467
468 return err;
469}
470
471
472static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
473{
474 return wait_event_timeout(msgbuf->ioctl_resp_wait,
475 msgbuf->ctl_completed,
476 msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT));
477}
478
479
480static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
481{
482 if (waitqueue_active(&msgbuf->ioctl_resp_wait)) {
483 msgbuf->ctl_completed = true;
484 wake_up(&msgbuf->ioctl_resp_wait);
485 }
486}
487
488
489static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
490 uint cmd, void *buf, uint len)
491{
492 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
493 struct sk_buff *skb = NULL;
494 int timeout;
495 int err;
496
497 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
498 msgbuf->ctl_completed = false;
499 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
500 if (err)
501 return err;
502
503 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
504 if (!timeout) {
505 brcmf_err("Timeout on response for query command\n");
506 return -EIO;
507 }
508
509 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
510 msgbuf->rx_pktids,
511 msgbuf->ioctl_resp_pktid);
512 if (msgbuf->ioctl_resp_ret_len != 0) {
513 if (!skb) {
514 brcmf_err("Invalid packet id idx recv'd %d\n",
515 msgbuf->ioctl_resp_pktid);
516 return -EBADF;
517 }
518 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
519 len : msgbuf->ioctl_resp_ret_len);
520 }
ac96ce83 521 brcmu_pkt_buf_free_skb(skb);
9a1bb602
HM
522
523 return msgbuf->ioctl_resp_status;
524}
525
526
527static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
528 uint cmd, void *buf, uint len)
529{
530 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len);
531}
532
533
534static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
535 u8 *ifidx, struct sk_buff *skb)
536{
537 return -ENODEV;
538}
539
540
541static void
542brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
543{
544 u32 dma_sz;
545 void *dma_buf;
546
547 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
548
549 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
550 dma_buf = msgbuf->flowrings[flowid]->buf_addr;
551 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
552 msgbuf->flowring_dma_handle[flowid]);
553
554 brcmf_flowring_delete(msgbuf->flow, flowid);
555}
556
557
3ba06610
HM
558static struct brcmf_msgbuf_work_item *
559brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
560{
561 struct brcmf_msgbuf_work_item *work = NULL;
562 ulong flags;
563
564 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
565 if (!list_empty(&msgbuf->work_queue)) {
566 work = list_first_entry(&msgbuf->work_queue,
567 struct brcmf_msgbuf_work_item, queue);
568 list_del(&work->queue);
569 }
570 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
571
572 return work;
573}
574
575
576static u32
577brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
578 struct brcmf_msgbuf_work_item *work)
9a1bb602
HM
579{
580 struct msgbuf_tx_flowring_create_req *create;
9a1bb602
HM
581 struct brcmf_commonring *commonring;
582 void *ret_ptr;
583 u32 flowid;
584 void *dma_buf;
585 u32 dma_sz;
83297aaa 586 u64 address;
9a1bb602
HM
587 int err;
588
3ba06610 589 flowid = work->flowid;
9a1bb602 590 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
9a1bb602
HM
591 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
592 &msgbuf->flowring_dma_handle[flowid],
3ba06610 593 GFP_KERNEL);
9a1bb602
HM
594 if (!dma_buf) {
595 brcmf_err("dma_alloc_coherent failed\n");
596 brcmf_flowring_delete(msgbuf->flow, flowid);
597 return BRCMF_FLOWRING_INVALID_ID;
598 }
599
600 brcmf_commonring_config(msgbuf->flowrings[flowid],
601 BRCMF_H2D_TXFLOWRING_MAX_ITEM,
602 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
603
604 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
605 brcmf_commonring_lock(commonring);
606 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
607 if (!ret_ptr) {
608 brcmf_err("Failed to reserve space in commonring\n");
609 brcmf_commonring_unlock(commonring);
610 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
611 return BRCMF_FLOWRING_INVALID_ID;
612 }
613
614 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
615 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
3ba06610 616 create->msg.ifidx = work->ifidx;
9a1bb602
HM
617 create->msg.request_id = 0;
618 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
619 create->flow_ring_id = cpu_to_le16(flowid +
620 BRCMF_NROF_H2D_COMMON_MSGRINGS);
3ba06610
HM
621 memcpy(create->sa, work->sa, ETH_ALEN);
622 memcpy(create->da, work->da, ETH_ALEN);
83297aaa 623 address = (u64)msgbuf->flowring_dma_handle[flowid];
9a1bb602
HM
624 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
625 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
626 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
627 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
628
629 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
3ba06610 630 flowid, work->da, create->tid, work->ifidx);
9a1bb602
HM
631
632 err = brcmf_commonring_write_complete(commonring);
633 brcmf_commonring_unlock(commonring);
634 if (err) {
635 brcmf_err("Failed to write commonring\n");
636 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
637 return BRCMF_FLOWRING_INVALID_ID;
638 }
639
640 return flowid;
641}
642
643
3ba06610
HM
644static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
645{
646 struct brcmf_msgbuf *msgbuf;
647 struct brcmf_msgbuf_work_item *create;
648
649 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
650
651 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
652 brcmf_msgbuf_flowring_create_worker(msgbuf, create);
653 kfree(create);
654 }
655}
656
657
658static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
659 struct sk_buff *skb)
660{
661 struct brcmf_msgbuf_work_item *create;
662 struct ethhdr *eh = (struct ethhdr *)(skb->data);
663 u32 flowid;
664 ulong flags;
665
666 create = kzalloc(sizeof(*create), GFP_ATOMIC);
667 if (create == NULL)
668 return BRCMF_FLOWRING_INVALID_ID;
669
670 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
671 skb->priority, ifidx);
672 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
673 kfree(create);
674 return flowid;
675 }
676
677 create->flowid = flowid;
678 create->ifidx = ifidx;
679 memcpy(create->sa, eh->h_source, ETH_ALEN);
680 memcpy(create->da, eh->h_dest, ETH_ALEN);
681
682 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
683 list_add_tail(&create->queue, &msgbuf->work_queue);
684 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
685 schedule_work(&msgbuf->flowring_work);
686
687 return flowid;
688}
689
690
9a1bb602
HM
691static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
692{
693 struct brcmf_flowring *flow = msgbuf->flow;
694 struct brcmf_commonring *commonring;
695 void *ret_ptr;
696 u32 count;
697 struct sk_buff *skb;
698 dma_addr_t physaddr;
699 u32 pktid;
700 struct msgbuf_tx_msghdr *tx_msghdr;
83297aaa 701 u64 address;
9a1bb602
HM
702
703 commonring = msgbuf->flowrings[flowid];
704 if (!brcmf_commonring_write_available(commonring))
705 return;
706
707 brcmf_commonring_lock(commonring);
708
709 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
710 while (brcmf_flowring_qlen(flow, flowid)) {
711 skb = brcmf_flowring_dequeue(flow, flowid);
712 if (skb == NULL) {
713 brcmf_err("No SKB, but qlen %d\n",
714 brcmf_flowring_qlen(flow, flowid));
715 break;
716 }
717 skb_orphan(skb);
718 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
719 msgbuf->tx_pktids, skb, ETH_HLEN,
720 &physaddr, &pktid)) {
721 brcmf_flowring_reinsert(flow, flowid, skb);
722 brcmf_err("No PKTID available !!\n");
723 break;
724 }
725 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
726 if (!ret_ptr) {
727 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
728 msgbuf->tx_pktids, pktid);
729 brcmf_flowring_reinsert(flow, flowid, skb);
730 break;
731 }
732 count++;
733
734 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
735
736 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
737 tx_msghdr->msg.request_id = cpu_to_le32(pktid);
738 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
739 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
740 tx_msghdr->flags |= (skb->priority & 0x07) <<
741 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
742 tx_msghdr->seg_cnt = 1;
743 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
744 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
83297aaa 745 address = (u64)physaddr;
9a1bb602
HM
746 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
747 tx_msghdr->data_buf_addr.low_addr =
748 cpu_to_le32(address & 0xffffffff);
749 tx_msghdr->metadata_buf_len = 0;
750 tx_msghdr->metadata_buf_addr.high_addr = 0;
751 tx_msghdr->metadata_buf_addr.low_addr = 0;
752 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
753 brcmf_commonring_write_complete(commonring);
754 count = 0;
755 }
756 }
757 if (count)
758 brcmf_commonring_write_complete(commonring);
759 brcmf_commonring_unlock(commonring);
760}
761
762
763static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
764{
765 struct brcmf_msgbuf *msgbuf;
766 u32 flowid;
767
768 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
769 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) {
770 clear_bit(flowid, msgbuf->flow_map);
771 brcmf_msgbuf_txflow(msgbuf, flowid);
772 }
773}
774
775
776static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid)
777{
778 set_bit(flowid, msgbuf->flow_map);
779 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
780
781 return 0;
782}
783
784
785static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
786 u8 offset, struct sk_buff *skb)
787{
788 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
789 struct brcmf_flowring *flow = msgbuf->flow;
790 struct ethhdr *eh = (struct ethhdr *)(skb->data);
791 u32 flowid;
792
793 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
794 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
795 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
796 if (flowid == BRCMF_FLOWRING_INVALID_ID)
797 return -ENOMEM;
798 }
799 brcmf_flowring_enqueue(flow, flowid, skb);
800 brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
801
802 return 0;
803}
804
805
806static void
807brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
808 enum proto_addr_mode addr_mode)
809{
810 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
811
812 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
813}
814
815
816static void
817brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
818{
819 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
820
821 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
822}
823
824
70b7d94b
HM
825static void
826brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
827{
828 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
829
830 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
831}
832
833
9a1bb602
HM
834static void
835brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
836{
837 struct msgbuf_ioctl_resp_hdr *ioctl_resp;
838
839 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
840
ff0a6230
HM
841 msgbuf->ioctl_resp_status =
842 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
9a1bb602
HM
843 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
844 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
845
846 brcmf_msgbuf_ioctl_resp_wake(msgbuf);
847
848 if (msgbuf->cur_ioctlrespbuf)
849 msgbuf->cur_ioctlrespbuf--;
850 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
851}
852
853
854static void
855brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
856{
857 struct msgbuf_tx_status *tx_status;
858 u32 idx;
859 struct sk_buff *skb;
860 u16 flowid;
861
862 tx_status = (struct msgbuf_tx_status *)buf;
863 idx = le32_to_cpu(tx_status->msg.request_id);
864 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
865 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
866 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
867 msgbuf->tx_pktids, idx);
868 if (!skb) {
869 brcmf_err("Invalid packet id idx recv'd %d\n", idx);
870 return;
871 }
872
873 set_bit(flowid, msgbuf->txstatus_done_map);
874
875 brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
876}
877
878
879static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
880{
881 struct brcmf_commonring *commonring;
882 void *ret_ptr;
883 struct sk_buff *skb;
884 u16 alloced;
885 u32 pktlen;
886 dma_addr_t physaddr;
887 struct msgbuf_rx_bufpost *rx_bufpost;
83297aaa 888 u64 address;
9a1bb602
HM
889 u32 pktid;
890 u32 i;
891
892 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
893 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
894 count,
895 &alloced);
896 if (!ret_ptr) {
897 brcmf_err("Failed to reserve space in commonring\n");
898 return 0;
899 }
900
901 for (i = 0; i < alloced; i++) {
902 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
903 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
904
905 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
906
907 if (skb == NULL) {
908 brcmf_err("Failed to alloc SKB\n");
909 brcmf_commonring_write_cancel(commonring, alloced - i);
910 break;
911 }
912
913 pktlen = skb->len;
914 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
915 msgbuf->rx_pktids, skb, 0,
916 &physaddr, &pktid)) {
917 dev_kfree_skb_any(skb);
918 brcmf_err("No PKTID available !!\n");
919 brcmf_commonring_write_cancel(commonring, alloced - i);
920 break;
921 }
922
923 if (msgbuf->rx_metadata_offset) {
83297aaa 924 address = (u64)physaddr;
9a1bb602
HM
925 rx_bufpost->metadata_buf_len =
926 cpu_to_le16(msgbuf->rx_metadata_offset);
927 rx_bufpost->metadata_buf_addr.high_addr =
928 cpu_to_le32(address >> 32);
929 rx_bufpost->metadata_buf_addr.low_addr =
930 cpu_to_le32(address & 0xffffffff);
931
932 skb_pull(skb, msgbuf->rx_metadata_offset);
933 pktlen = skb->len;
934 physaddr += msgbuf->rx_metadata_offset;
935 }
936 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
937 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
938
83297aaa 939 address = (u64)physaddr;
9a1bb602
HM
940 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
941 rx_bufpost->data_buf_addr.high_addr =
942 cpu_to_le32(address >> 32);
943 rx_bufpost->data_buf_addr.low_addr =
944 cpu_to_le32(address & 0xffffffff);
945
946 ret_ptr += brcmf_commonring_len_item(commonring);
947 }
948
949 if (i)
950 brcmf_commonring_write_complete(commonring);
951
952 return i;
953}
954
955
956static void
957brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
958{
959 u32 fillbufs;
960 u32 retcount;
961
962 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
963
964 while (fillbufs) {
965 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
966 if (!retcount)
967 break;
968 msgbuf->rxbufpost += retcount;
969 fillbufs -= retcount;
970 }
971}
972
973
974static void
975brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
976{
977 msgbuf->rxbufpost -= rxcnt;
978 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
979 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
980 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
981}
982
983
984static u32
985brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
986 u32 count)
987{
988 struct brcmf_commonring *commonring;
989 void *ret_ptr;
990 struct sk_buff *skb;
991 u16 alloced;
992 u32 pktlen;
993 dma_addr_t physaddr;
994 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
83297aaa 995 u64 address;
9a1bb602
HM
996 u32 pktid;
997 u32 i;
998
999 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1000 brcmf_commonring_lock(commonring);
1001 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1002 count,
1003 &alloced);
1004 if (!ret_ptr) {
1005 brcmf_err("Failed to reserve space in commonring\n");
1006 brcmf_commonring_unlock(commonring);
1007 return 0;
1008 }
1009
1010 for (i = 0; i < alloced; i++) {
1011 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1012 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1013
1014 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
1015
1016 if (skb == NULL) {
1017 brcmf_err("Failed to alloc SKB\n");
1018 brcmf_commonring_write_cancel(commonring, alloced - i);
1019 break;
1020 }
1021
1022 pktlen = skb->len;
1023 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1024 msgbuf->rx_pktids, skb, 0,
1025 &physaddr, &pktid)) {
1026 dev_kfree_skb_any(skb);
1027 brcmf_err("No PKTID available !!\n");
1028 brcmf_commonring_write_cancel(commonring, alloced - i);
1029 break;
1030 }
1031 if (event_buf)
1032 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1033 else
1034 rx_bufpost->msg.msgtype =
1035 MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1036 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1037
83297aaa 1038 address = (u64)physaddr;
9a1bb602
HM
1039 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1040 rx_bufpost->host_buf_addr.high_addr =
1041 cpu_to_le32(address >> 32);
1042 rx_bufpost->host_buf_addr.low_addr =
1043 cpu_to_le32(address & 0xffffffff);
1044
1045 ret_ptr += brcmf_commonring_len_item(commonring);
1046 }
1047
1048 if (i)
1049 brcmf_commonring_write_complete(commonring);
1050
1051 brcmf_commonring_unlock(commonring);
1052
1053 return i;
1054}
1055
1056
1057static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1058{
1059 u32 count;
1060
1061 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1062 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1063 msgbuf->cur_ioctlrespbuf += count;
1064}
1065
1066
1067static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1068{
1069 u32 count;
1070
1071 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1072 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1073 msgbuf->cur_eventbuf += count;
1074}
1075
1076
1077static void
1078brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
1079 u8 ifidx)
1080{
1081 struct brcmf_if *ifp;
1082
94a61208
HM
1083 /* The ifidx is the idx to map to matching netdev/ifp. When receiving
1084 * events this is easy because it contains the bssidx which maps
1085 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
1086 * bssidx 1 is used for p2p0 and no data can be received or
1087 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
1088 */
1089 if (ifidx)
1090 (ifidx)++;
9a1bb602
HM
1091 ifp = msgbuf->drvr->iflist[ifidx];
1092 if (!ifp || !ifp->ndev) {
94a61208 1093 brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
9a1bb602
HM
1094 brcmu_pkt_buf_free_skb(skb);
1095 return;
1096 }
1097 brcmf_netif_rx(ifp, skb);
1098}
1099
1100
1101static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1102{
1103 struct msgbuf_rx_event *event;
1104 u32 idx;
1105 u16 buflen;
1106 struct sk_buff *skb;
1107
1108 event = (struct msgbuf_rx_event *)buf;
1109 idx = le32_to_cpu(event->msg.request_id);
1110 buflen = le16_to_cpu(event->event_data_len);
1111
1112 if (msgbuf->cur_eventbuf)
1113 msgbuf->cur_eventbuf--;
1114 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1115
1116 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1117 msgbuf->rx_pktids, idx);
1118 if (!skb)
1119 return;
1120
1121 if (msgbuf->rx_dataoffset)
1122 skb_pull(skb, msgbuf->rx_dataoffset);
1123
1124 skb_trim(skb, buflen);
1125
1126 brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
1127}
1128
1129
1130static void
1131brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1132{
1133 struct msgbuf_rx_complete *rx_complete;
1134 struct sk_buff *skb;
1135 u16 data_offset;
1136 u16 buflen;
1137 u32 idx;
1138
1139 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1140
1141 rx_complete = (struct msgbuf_rx_complete *)buf;
1142 data_offset = le16_to_cpu(rx_complete->data_offset);
1143 buflen = le16_to_cpu(rx_complete->data_len);
1144 idx = le32_to_cpu(rx_complete->msg.request_id);
1145
1146 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1147 msgbuf->rx_pktids, idx);
1148
1149 if (data_offset)
1150 skb_pull(skb, data_offset);
1151 else if (msgbuf->rx_dataoffset)
1152 skb_pull(skb, msgbuf->rx_dataoffset);
1153
1154 skb_trim(skb, buflen);
1155
1156 brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
1157}
1158
1159
1160static void
1161brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1162 void *buf)
1163{
1164 struct msgbuf_flowring_create_resp *flowring_create_resp;
1165 u16 status;
1166 u16 flowid;
1167
1168 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1169
1170 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1171 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1172 status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
1173
1174 if (status) {
1175 brcmf_err("Flowring creation failed, code %d\n", status);
1176 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1177 return;
1178 }
1179 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1180 status);
1181
1182 brcmf_flowring_open(msgbuf->flow, flowid);
1183
1184 brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
1185}
1186
1187
1188static void
1189brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1190 void *buf)
1191{
1192 struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1193 u16 status;
1194 u16 flowid;
1195
1196 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1197
1198 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1199 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
1200 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1201
1202 if (status) {
1203 brcmf_err("Flowring deletion failed, code %d\n", status);
1204 brcmf_flowring_delete(msgbuf->flow, flowid);
1205 return;
1206 }
1207 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1208 status);
1209
1210 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1211}
1212
1213
1214static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1215{
1216 struct msgbuf_common_hdr *msg;
1217
1218 msg = (struct msgbuf_common_hdr *)buf;
1219 switch (msg->msgtype) {
1220 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1221 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1222 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1223 break;
1224 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1225 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1226 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1227 break;
1228 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1229 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1230 break;
1231 case MSGBUF_TYPE_IOCTL_CMPLT:
1232 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1233 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1234 break;
1235 case MSGBUF_TYPE_WL_EVENT:
1236 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1237 brcmf_msgbuf_process_event(msgbuf, buf);
1238 break;
1239 case MSGBUF_TYPE_TX_STATUS:
1240 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1241 brcmf_msgbuf_process_txstatus(msgbuf, buf);
1242 break;
1243 case MSGBUF_TYPE_RX_CMPLT:
1244 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1245 brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1246 break;
1247 default:
1248 brcmf_err("Unsupported msgtype %d\n", msg->msgtype);
1249 break;
1250 }
1251}
1252
1253
1254static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1255 struct brcmf_commonring *commonring)
1256{
1257 void *buf;
1258 u16 count;
1259
1260again:
1261 buf = brcmf_commonring_get_read_ptr(commonring, &count);
1262 if (buf == NULL)
1263 return;
1264
1265 while (count) {
1266 brcmf_msgbuf_process_msgtype(msgbuf,
1267 buf + msgbuf->rx_dataoffset);
1268 buf += brcmf_commonring_len_item(commonring);
1269 count--;
1270 }
1271 brcmf_commonring_read_complete(commonring);
1272
1273 if (commonring->r_ptr == 0)
1274 goto again;
1275}
1276
1277
1278int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1279{
1280 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1281 struct brcmf_pub *drvr = bus_if->drvr;
1282 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1283 void *buf;
1284 u32 flowid;
1285
1286 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1287 brcmf_msgbuf_process_rx(msgbuf, buf);
1288 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1289 brcmf_msgbuf_process_rx(msgbuf, buf);
1290 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1291 brcmf_msgbuf_process_rx(msgbuf, buf);
1292
1293 for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1294 msgbuf->nrof_flowrings) {
1295 clear_bit(flowid, msgbuf->txstatus_done_map);
1296 if (brcmf_flowring_qlen(msgbuf->flow, flowid))
1297 brcmf_msgbuf_schedule_txdata(msgbuf, flowid);
1298 }
1299
1300 return 0;
1301}
1302
1303
1304void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
1305{
1306 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1307 struct msgbuf_tx_flowring_delete_req *delete;
1308 struct brcmf_commonring *commonring;
1309 void *ret_ptr;
1310 u8 ifidx;
1311 int err;
1312
1313 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1314 brcmf_commonring_lock(commonring);
1315 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1316 if (!ret_ptr) {
1317 brcmf_err("FW unaware, flowring will be removed !!\n");
1318 brcmf_commonring_unlock(commonring);
1319 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1320 return;
1321 }
1322
1323 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1324
1325 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1326
1327 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1328 delete->msg.ifidx = ifidx;
1329 delete->msg.request_id = 0;
1330
1331 delete->flow_ring_id = cpu_to_le16(flowid +
1332 BRCMF_NROF_H2D_COMMON_MSGRINGS);
1333 delete->reason = 0;
1334
1335 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1336 flowid, ifidx);
1337
1338 err = brcmf_commonring_write_complete(commonring);
1339 brcmf_commonring_unlock(commonring);
1340 if (err) {
1341 brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1342 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1343 }
1344}
1345
1346
1347int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1348{
1349 struct brcmf_bus_msgbuf *if_msgbuf;
1350 struct brcmf_msgbuf *msgbuf;
83297aaa 1351 u64 address;
9a1bb602
HM
1352 u32 count;
1353
1354 if_msgbuf = drvr->bus_if->msgbuf;
3ba06610 1355 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
9a1bb602
HM
1356 if (!msgbuf)
1357 goto fail;
1358
1359 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1360 if (msgbuf->txflow_wq == NULL) {
1361 brcmf_err("workqueue creation failed\n");
1362 goto fail;
1363 }
1364 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1365 count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings);
333c2aa0 1366 count = count * sizeof(unsigned long);
3ba06610 1367 msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
9a1bb602
HM
1368 if (!msgbuf->flow_map)
1369 goto fail;
1370
3ba06610 1371 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
9a1bb602
HM
1372 if (!msgbuf->txstatus_done_map)
1373 goto fail;
1374
1375 msgbuf->drvr = drvr;
1376 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1377 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1378 &msgbuf->ioctbuf_handle,
3ba06610 1379 GFP_KERNEL);
9a1bb602
HM
1380 if (!msgbuf->ioctbuf)
1381 goto fail;
83297aaa 1382 address = (u64)msgbuf->ioctbuf_handle;
9a1bb602
HM
1383 msgbuf->ioctbuf_phys_hi = address >> 32;
1384 msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1385
1386 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1387 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1388 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1389 drvr->proto->txdata = brcmf_msgbuf_txdata;
1390 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1391 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
70b7d94b 1392 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
9a1bb602
HM
1393 drvr->proto->pd = msgbuf;
1394
1395 init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1396
1397 msgbuf->commonrings =
1398 (struct brcmf_commonring **)if_msgbuf->commonrings;
1399 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1400 msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings;
1401 msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings *
3ba06610 1402 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
2d116b88
AS
1403 if (!msgbuf->flowring_dma_handle)
1404 goto fail;
9a1bb602
HM
1405
1406 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1407 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1408
1409 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1410 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1411
1412 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1413 DMA_TO_DEVICE);
1414 if (!msgbuf->tx_pktids)
1415 goto fail;
1416 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1417 DMA_FROM_DEVICE);
1418 if (!msgbuf->rx_pktids)
1419 goto fail;
1420
1421 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1422 if_msgbuf->nrof_flowrings);
1423 if (!msgbuf->flow)
1424 goto fail;
1425
1426
1427 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1428 msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1429 msgbuf->max_ioctlrespbuf);
1430 count = 0;
1431 do {
1432 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1433 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1434 msleep(10);
1435 else
1436 break;
1437 count++;
1438 } while (count < 10);
1439 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1440 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1441
3ba06610
HM
1442 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1443 spin_lock_init(&msgbuf->flowring_work_lock);
1444 INIT_LIST_HEAD(&msgbuf->work_queue);
1445
9a1bb602
HM
1446 return 0;
1447
1448fail:
1449 if (msgbuf) {
1450 kfree(msgbuf->flow_map);
1451 kfree(msgbuf->txstatus_done_map);
1452 brcmf_msgbuf_release_pktids(msgbuf);
2d116b88 1453 kfree(msgbuf->flowring_dma_handle);
9a1bb602
HM
1454 if (msgbuf->ioctbuf)
1455 dma_free_coherent(drvr->bus_if->dev,
1456 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1457 msgbuf->ioctbuf,
1458 msgbuf->ioctbuf_handle);
1459 kfree(msgbuf);
1460 }
1461 return -ENOMEM;
1462}
1463
1464
1465void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1466{
1467 struct brcmf_msgbuf *msgbuf;
3ba06610 1468 struct brcmf_msgbuf_work_item *work;
9a1bb602
HM
1469
1470 brcmf_dbg(TRACE, "Enter\n");
1471 if (drvr->proto->pd) {
1472 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
3ba06610
HM
1473 cancel_work_sync(&msgbuf->flowring_work);
1474 while (!list_empty(&msgbuf->work_queue)) {
1475 work = list_first_entry(&msgbuf->work_queue,
1476 struct brcmf_msgbuf_work_item,
1477 queue);
1478 list_del(&work->queue);
1479 kfree(work);
1480 }
9a1bb602
HM
1481 kfree(msgbuf->flow_map);
1482 kfree(msgbuf->txstatus_done_map);
1483 if (msgbuf->txflow_wq)
1484 destroy_workqueue(msgbuf->txflow_wq);
1485
1486 brcmf_flowring_detach(msgbuf->flow);
1487 dma_free_coherent(drvr->bus_if->dev,
1488 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1489 msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1490 brcmf_msgbuf_release_pktids(msgbuf);
2d116b88 1491 kfree(msgbuf->flowring_dma_handle);
9a1bb602
HM
1492 kfree(msgbuf);
1493 drvr->proto->pd = NULL;
1494 }
1495}