1 /* Copyright (c) 2014 Broadcom Corporation
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /*******************************************************************************
17 * Communicates with the dongle by using dcmd codes.
18 * For certain dcmd codes, the dongle interprets string data from the host.
19 ******************************************************************************/
21 #include <linux/types.h>
22 #include <linux/netdevice.h>
24 #include <brcmu_utils.h>
25 #include <brcmu_wifi.h>
31 #include "commonring.h"
34 #include "tracepoint.h"
37 #define MSGBUF_IOCTL_RESP_TIMEOUT 2000
39 #define MSGBUF_TYPE_GEN_STATUS 0x1
40 #define MSGBUF_TYPE_RING_STATUS 0x2
41 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
42 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
43 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
44 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
45 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
46 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
47 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
48 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
49 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
50 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC
51 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD
52 #define MSGBUF_TYPE_WL_EVENT 0xE
53 #define MSGBUF_TYPE_TX_POST 0xF
54 #define MSGBUF_TYPE_TX_STATUS 0x10
55 #define MSGBUF_TYPE_RXBUF_POST 0x11
56 #define MSGBUF_TYPE_RX_CMPLT 0x12
57 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13
58 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
60 #define NR_TX_PKTIDS 2048
61 #define NR_RX_PKTIDS 1024
63 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE
65 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
66 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
67 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
68 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
70 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
71 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
73 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
74 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
76 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
77 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
79 struct msgbuf_common_hdr
{
87 struct msgbuf_buf_addr
{
92 struct msgbuf_ioctl_req_hdr
{
93 struct msgbuf_common_hdr msg
;
97 __le16 output_buf_len
;
99 struct msgbuf_buf_addr req_buf_addr
;
103 struct msgbuf_tx_msghdr
{
104 struct msgbuf_common_hdr msg
;
108 struct msgbuf_buf_addr metadata_buf_addr
;
109 struct msgbuf_buf_addr data_buf_addr
;
110 __le16 metadata_buf_len
;
115 struct msgbuf_rx_bufpost
{
116 struct msgbuf_common_hdr msg
;
117 __le16 metadata_buf_len
;
120 struct msgbuf_buf_addr metadata_buf_addr
;
121 struct msgbuf_buf_addr data_buf_addr
;
124 struct msgbuf_rx_ioctl_resp_or_event
{
125 struct msgbuf_common_hdr msg
;
128 struct msgbuf_buf_addr host_buf_addr
;
132 struct msgbuf_completion_hdr
{
137 struct msgbuf_rx_event
{
138 struct msgbuf_common_hdr msg
;
139 struct msgbuf_completion_hdr compl_hdr
;
140 __le16 event_data_len
;
145 struct msgbuf_ioctl_resp_hdr
{
146 struct msgbuf_common_hdr msg
;
147 struct msgbuf_completion_hdr compl_hdr
;
154 struct msgbuf_tx_status
{
155 struct msgbuf_common_hdr msg
;
156 struct msgbuf_completion_hdr compl_hdr
;
161 struct msgbuf_rx_complete
{
162 struct msgbuf_common_hdr msg
;
163 struct msgbuf_completion_hdr compl_hdr
;
173 struct msgbuf_tx_flowring_create_req
{
174 struct msgbuf_common_hdr msg
;
185 struct msgbuf_buf_addr flow_ring_addr
;
188 struct msgbuf_tx_flowring_delete_req
{
189 struct msgbuf_common_hdr msg
;
195 struct msgbuf_flowring_create_resp
{
196 struct msgbuf_common_hdr msg
;
197 struct msgbuf_completion_hdr compl_hdr
;
201 struct msgbuf_flowring_delete_resp
{
202 struct msgbuf_common_hdr msg
;
203 struct msgbuf_completion_hdr compl_hdr
;
207 struct msgbuf_flowring_flush_resp
{
208 struct msgbuf_common_hdr msg
;
209 struct msgbuf_completion_hdr compl_hdr
;
213 struct brcmf_msgbuf_work_item
{
214 struct list_head queue
;
221 struct brcmf_msgbuf
{
222 struct brcmf_pub
*drvr
;
224 struct brcmf_commonring
**commonrings
;
225 struct brcmf_commonring
**flowrings
;
226 dma_addr_t
*flowring_dma_handle
;
231 u16 rx_metadata_offset
;
234 u32 max_ioctlrespbuf
;
235 u32 cur_ioctlrespbuf
;
240 dma_addr_t ioctbuf_handle
;
243 int ioctl_resp_status
;
244 u32 ioctl_resp_ret_len
;
245 u32 ioctl_resp_pktid
;
250 wait_queue_head_t ioctl_resp_wait
;
253 struct brcmf_msgbuf_pktids
*tx_pktids
;
254 struct brcmf_msgbuf_pktids
*rx_pktids
;
255 struct brcmf_flowring
*flow
;
257 struct workqueue_struct
*txflow_wq
;
258 struct work_struct txflow_work
;
259 unsigned long *flow_map
;
260 unsigned long *txstatus_done_map
;
262 struct work_struct flowring_work
;
263 spinlock_t flowring_work_lock
;
264 struct list_head work_queue
;
267 struct brcmf_msgbuf_pktid
{
274 struct brcmf_msgbuf_pktids
{
276 u32 last_allocated_idx
;
277 enum dma_data_direction direction
;
278 struct brcmf_msgbuf_pktid
*array
;
282 /* dma flushing needs implementation for mips and arm platforms. Should
283 * be put in util. Note, this is not real flushing. It is virtual non
284 * cached memory. Only write buffers should have to be drained. Though
285 * this may be different depending on platform......
287 #define brcmf_dma_flush(addr, len)
288 #define brcmf_dma_invalidate_cache(addr, len)
291 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf
*msgbuf
);
294 static struct brcmf_msgbuf_pktids
*
295 brcmf_msgbuf_init_pktids(u32 nr_array_entries
,
296 enum dma_data_direction direction
)
298 struct brcmf_msgbuf_pktid
*array
;
299 struct brcmf_msgbuf_pktids
*pktids
;
301 array
= kcalloc(nr_array_entries
, sizeof(*array
), GFP_KERNEL
);
305 pktids
= kzalloc(sizeof(*pktids
), GFP_KERNEL
);
310 pktids
->array
= array
;
311 pktids
->array_size
= nr_array_entries
;
318 brcmf_msgbuf_alloc_pktid(struct device
*dev
,
319 struct brcmf_msgbuf_pktids
*pktids
,
320 struct sk_buff
*skb
, u16 data_offset
,
321 dma_addr_t
*physaddr
, u32
*idx
)
323 struct brcmf_msgbuf_pktid
*array
;
326 array
= pktids
->array
;
328 *physaddr
= dma_map_single(dev
, skb
->data
+ data_offset
,
329 skb
->len
- data_offset
, pktids
->direction
);
331 if (dma_mapping_error(dev
, *physaddr
)) {
332 brcmf_err("dma_map_single failed !!\n");
336 *idx
= pktids
->last_allocated_idx
;
341 if (*idx
== pktids
->array_size
)
343 if (array
[*idx
].allocated
.counter
== 0)
344 if (atomic_cmpxchg(&array
[*idx
].allocated
, 0, 1) == 0)
347 } while (count
< pktids
->array_size
);
349 if (count
== pktids
->array_size
)
352 array
[*idx
].data_offset
= data_offset
;
353 array
[*idx
].physaddr
= *physaddr
;
354 array
[*idx
].skb
= skb
;
356 pktids
->last_allocated_idx
= *idx
;
362 static struct sk_buff
*
363 brcmf_msgbuf_get_pktid(struct device
*dev
, struct brcmf_msgbuf_pktids
*pktids
,
366 struct brcmf_msgbuf_pktid
*pktid
;
369 if (idx
>= pktids
->array_size
) {
370 brcmf_err("Invalid packet id %d (max %d)\n", idx
,
374 if (pktids
->array
[idx
].allocated
.counter
) {
375 pktid
= &pktids
->array
[idx
];
376 dma_unmap_single(dev
, pktid
->physaddr
,
377 pktid
->skb
->len
- pktid
->data_offset
,
380 pktid
->allocated
.counter
= 0;
383 brcmf_err("Invalid packet id %d (not in use)\n", idx
);
391 brcmf_msgbuf_release_array(struct device
*dev
,
392 struct brcmf_msgbuf_pktids
*pktids
)
394 struct brcmf_msgbuf_pktid
*array
;
395 struct brcmf_msgbuf_pktid
*pktid
;
398 array
= pktids
->array
;
401 if (array
[count
].allocated
.counter
) {
402 pktid
= &array
[count
];
403 dma_unmap_single(dev
, pktid
->physaddr
,
404 pktid
->skb
->len
- pktid
->data_offset
,
406 brcmu_pkt_buf_free_skb(pktid
->skb
);
409 } while (count
< pktids
->array_size
);
416 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf
*msgbuf
)
418 if (msgbuf
->rx_pktids
)
419 brcmf_msgbuf_release_array(msgbuf
->drvr
->bus_if
->dev
,
421 if (msgbuf
->tx_pktids
)
422 brcmf_msgbuf_release_array(msgbuf
->drvr
->bus_if
->dev
,
427 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub
*drvr
, int ifidx
,
428 uint cmd
, void *buf
, uint len
)
430 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
431 struct brcmf_commonring
*commonring
;
432 struct msgbuf_ioctl_req_hdr
*request
;
437 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
438 brcmf_commonring_lock(commonring
);
439 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
441 brcmf_err("Failed to reserve space in commonring\n");
442 brcmf_commonring_unlock(commonring
);
448 request
= (struct msgbuf_ioctl_req_hdr
*)ret_ptr
;
449 request
->msg
.msgtype
= MSGBUF_TYPE_IOCTLPTR_REQ
;
450 request
->msg
.ifidx
= (u8
)ifidx
;
451 request
->msg
.flags
= 0;
452 request
->msg
.request_id
= cpu_to_le32(BRCMF_IOCTL_REQ_PKTID
);
453 request
->cmd
= cpu_to_le32(cmd
);
454 request
->output_buf_len
= cpu_to_le16(len
);
455 request
->trans_id
= cpu_to_le16(msgbuf
->reqid
);
457 buf_len
= min_t(u16
, len
, BRCMF_TX_IOCTL_MAX_MSG_SIZE
);
458 request
->input_buf_len
= cpu_to_le16(buf_len
);
459 request
->req_buf_addr
.high_addr
= cpu_to_le32(msgbuf
->ioctbuf_phys_hi
);
460 request
->req_buf_addr
.low_addr
= cpu_to_le32(msgbuf
->ioctbuf_phys_lo
);
462 memcpy(msgbuf
->ioctbuf
, buf
, buf_len
);
464 memset(msgbuf
->ioctbuf
, 0, buf_len
);
465 brcmf_dma_flush(ioctl_buf
, buf_len
);
467 err
= brcmf_commonring_write_complete(commonring
);
468 brcmf_commonring_unlock(commonring
);
474 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf
*msgbuf
)
476 return wait_event_timeout(msgbuf
->ioctl_resp_wait
,
477 msgbuf
->ctl_completed
,
478 msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT
));
482 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf
*msgbuf
)
484 msgbuf
->ctl_completed
= true;
485 if (waitqueue_active(&msgbuf
->ioctl_resp_wait
))
486 wake_up(&msgbuf
->ioctl_resp_wait
);
490 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub
*drvr
, int ifidx
,
491 uint cmd
, void *buf
, uint len
)
493 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
494 struct sk_buff
*skb
= NULL
;
498 brcmf_dbg(MSGBUF
, "ifidx=%d, cmd=%d, len=%d\n", ifidx
, cmd
, len
);
499 msgbuf
->ctl_completed
= false;
500 err
= brcmf_msgbuf_tx_ioctl(drvr
, ifidx
, cmd
, buf
, len
);
504 timeout
= brcmf_msgbuf_ioctl_resp_wait(msgbuf
);
506 brcmf_err("Timeout on response for query command\n");
510 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
512 msgbuf
->ioctl_resp_pktid
);
513 if (msgbuf
->ioctl_resp_ret_len
!= 0) {
517 memcpy(buf
, skb
->data
, (len
< msgbuf
->ioctl_resp_ret_len
) ?
518 len
: msgbuf
->ioctl_resp_ret_len
);
520 brcmu_pkt_buf_free_skb(skb
);
522 return msgbuf
->ioctl_resp_status
;
526 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub
*drvr
, int ifidx
,
527 uint cmd
, void *buf
, uint len
)
529 return brcmf_msgbuf_query_dcmd(drvr
, ifidx
, cmd
, buf
, len
);
533 static int brcmf_msgbuf_hdrpull(struct brcmf_pub
*drvr
, bool do_fws
,
534 u8
*ifidx
, struct sk_buff
*skb
)
541 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf
*msgbuf
, u16 flowid
)
546 brcmf_dbg(MSGBUF
, "Removing flowring %d\n", flowid
);
548 dma_sz
= BRCMF_H2D_TXFLOWRING_MAX_ITEM
* BRCMF_H2D_TXFLOWRING_ITEMSIZE
;
549 dma_buf
= msgbuf
->flowrings
[flowid
]->buf_addr
;
550 dma_free_coherent(msgbuf
->drvr
->bus_if
->dev
, dma_sz
, dma_buf
,
551 msgbuf
->flowring_dma_handle
[flowid
]);
553 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
557 static struct brcmf_msgbuf_work_item
*
558 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf
*msgbuf
)
560 struct brcmf_msgbuf_work_item
*work
= NULL
;
563 spin_lock_irqsave(&msgbuf
->flowring_work_lock
, flags
);
564 if (!list_empty(&msgbuf
->work_queue
)) {
565 work
= list_first_entry(&msgbuf
->work_queue
,
566 struct brcmf_msgbuf_work_item
, queue
);
567 list_del(&work
->queue
);
569 spin_unlock_irqrestore(&msgbuf
->flowring_work_lock
, flags
);
576 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf
*msgbuf
,
577 struct brcmf_msgbuf_work_item
*work
)
579 struct msgbuf_tx_flowring_create_req
*create
;
580 struct brcmf_commonring
*commonring
;
588 flowid
= work
->flowid
;
589 dma_sz
= BRCMF_H2D_TXFLOWRING_MAX_ITEM
* BRCMF_H2D_TXFLOWRING_ITEMSIZE
;
590 dma_buf
= dma_alloc_coherent(msgbuf
->drvr
->bus_if
->dev
, dma_sz
,
591 &msgbuf
->flowring_dma_handle
[flowid
],
594 brcmf_err("dma_alloc_coherent failed\n");
595 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
596 return BRCMF_FLOWRING_INVALID_ID
;
599 brcmf_commonring_config(msgbuf
->flowrings
[flowid
],
600 BRCMF_H2D_TXFLOWRING_MAX_ITEM
,
601 BRCMF_H2D_TXFLOWRING_ITEMSIZE
, dma_buf
);
603 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
604 brcmf_commonring_lock(commonring
);
605 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
607 brcmf_err("Failed to reserve space in commonring\n");
608 brcmf_commonring_unlock(commonring
);
609 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
610 return BRCMF_FLOWRING_INVALID_ID
;
613 create
= (struct msgbuf_tx_flowring_create_req
*)ret_ptr
;
614 create
->msg
.msgtype
= MSGBUF_TYPE_FLOW_RING_CREATE
;
615 create
->msg
.ifidx
= work
->ifidx
;
616 create
->msg
.request_id
= 0;
617 create
->tid
= brcmf_flowring_tid(msgbuf
->flow
, flowid
);
618 create
->flow_ring_id
= cpu_to_le16(flowid
+
619 BRCMF_NROF_H2D_COMMON_MSGRINGS
);
620 memcpy(create
->sa
, work
->sa
, ETH_ALEN
);
621 memcpy(create
->da
, work
->da
, ETH_ALEN
);
622 address
= (u64
)msgbuf
->flowring_dma_handle
[flowid
];
623 create
->flow_ring_addr
.high_addr
= cpu_to_le32(address
>> 32);
624 create
->flow_ring_addr
.low_addr
= cpu_to_le32(address
& 0xffffffff);
625 create
->max_items
= cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM
);
626 create
->len_item
= cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE
);
628 brcmf_dbg(MSGBUF
, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
629 flowid
, work
->da
, create
->tid
, work
->ifidx
);
631 err
= brcmf_commonring_write_complete(commonring
);
632 brcmf_commonring_unlock(commonring
);
634 brcmf_err("Failed to write commonring\n");
635 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
636 return BRCMF_FLOWRING_INVALID_ID
;
643 static void brcmf_msgbuf_flowring_worker(struct work_struct
*work
)
645 struct brcmf_msgbuf
*msgbuf
;
646 struct brcmf_msgbuf_work_item
*create
;
648 msgbuf
= container_of(work
, struct brcmf_msgbuf
, flowring_work
);
650 while ((create
= brcmf_msgbuf_dequeue_work(msgbuf
))) {
651 brcmf_msgbuf_flowring_create_worker(msgbuf
, create
);
657 static u32
brcmf_msgbuf_flowring_create(struct brcmf_msgbuf
*msgbuf
, int ifidx
,
660 struct brcmf_msgbuf_work_item
*create
;
661 struct ethhdr
*eh
= (struct ethhdr
*)(skb
->data
);
665 create
= kzalloc(sizeof(*create
), GFP_ATOMIC
);
667 return BRCMF_FLOWRING_INVALID_ID
;
669 flowid
= brcmf_flowring_create(msgbuf
->flow
, eh
->h_dest
,
670 skb
->priority
, ifidx
);
671 if (flowid
== BRCMF_FLOWRING_INVALID_ID
) {
676 create
->flowid
= flowid
;
677 create
->ifidx
= ifidx
;
678 memcpy(create
->sa
, eh
->h_source
, ETH_ALEN
);
679 memcpy(create
->da
, eh
->h_dest
, ETH_ALEN
);
681 spin_lock_irqsave(&msgbuf
->flowring_work_lock
, flags
);
682 list_add_tail(&create
->queue
, &msgbuf
->work_queue
);
683 spin_unlock_irqrestore(&msgbuf
->flowring_work_lock
, flags
);
684 schedule_work(&msgbuf
->flowring_work
);
690 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf
*msgbuf
, u8 flowid
)
692 struct brcmf_flowring
*flow
= msgbuf
->flow
;
693 struct brcmf_commonring
*commonring
;
699 struct msgbuf_tx_msghdr
*tx_msghdr
;
702 commonring
= msgbuf
->flowrings
[flowid
];
703 if (!brcmf_commonring_write_available(commonring
))
706 brcmf_commonring_lock(commonring
);
708 count
= BRCMF_MSGBUF_TX_FLUSH_CNT2
- BRCMF_MSGBUF_TX_FLUSH_CNT1
;
709 while (brcmf_flowring_qlen(flow
, flowid
)) {
710 skb
= brcmf_flowring_dequeue(flow
, flowid
);
712 brcmf_err("No SKB, but qlen %d\n",
713 brcmf_flowring_qlen(flow
, flowid
));
717 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
718 msgbuf
->tx_pktids
, skb
, ETH_HLEN
,
719 &physaddr
, &pktid
)) {
720 brcmf_flowring_reinsert(flow
, flowid
, skb
);
721 brcmf_err("No PKTID available !!\n");
724 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
726 brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
727 msgbuf
->tx_pktids
, pktid
);
728 brcmf_flowring_reinsert(flow
, flowid
, skb
);
733 tx_msghdr
= (struct msgbuf_tx_msghdr
*)ret_ptr
;
735 tx_msghdr
->msg
.msgtype
= MSGBUF_TYPE_TX_POST
;
736 tx_msghdr
->msg
.request_id
= cpu_to_le32(pktid
);
737 tx_msghdr
->msg
.ifidx
= brcmf_flowring_ifidx_get(flow
, flowid
);
738 tx_msghdr
->flags
= BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3
;
739 tx_msghdr
->flags
|= (skb
->priority
& 0x07) <<
740 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT
;
741 tx_msghdr
->seg_cnt
= 1;
742 memcpy(tx_msghdr
->txhdr
, skb
->data
, ETH_HLEN
);
743 tx_msghdr
->data_len
= cpu_to_le16(skb
->len
- ETH_HLEN
);
744 address
= (u64
)physaddr
;
745 tx_msghdr
->data_buf_addr
.high_addr
= cpu_to_le32(address
>> 32);
746 tx_msghdr
->data_buf_addr
.low_addr
=
747 cpu_to_le32(address
& 0xffffffff);
748 tx_msghdr
->metadata_buf_len
= 0;
749 tx_msghdr
->metadata_buf_addr
.high_addr
= 0;
750 tx_msghdr
->metadata_buf_addr
.low_addr
= 0;
751 atomic_inc(&commonring
->outstanding_tx
);
752 if (count
>= BRCMF_MSGBUF_TX_FLUSH_CNT2
) {
753 brcmf_commonring_write_complete(commonring
);
758 brcmf_commonring_write_complete(commonring
);
759 brcmf_commonring_unlock(commonring
);
763 static void brcmf_msgbuf_txflow_worker(struct work_struct
*worker
)
765 struct brcmf_msgbuf
*msgbuf
;
768 msgbuf
= container_of(worker
, struct brcmf_msgbuf
, txflow_work
);
769 for_each_set_bit(flowid
, msgbuf
->flow_map
, msgbuf
->nrof_flowrings
) {
770 clear_bit(flowid
, msgbuf
->flow_map
);
771 brcmf_msgbuf_txflow(msgbuf
, flowid
);
776 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf
*msgbuf
, u32 flowid
,
779 struct brcmf_commonring
*commonring
;
781 set_bit(flowid
, msgbuf
->flow_map
);
782 commonring
= msgbuf
->flowrings
[flowid
];
783 if ((force
) || (atomic_read(&commonring
->outstanding_tx
) <
784 BRCMF_MSGBUF_DELAY_TXWORKER_THRS
))
785 queue_work(msgbuf
->txflow_wq
, &msgbuf
->txflow_work
);
791 static int brcmf_msgbuf_txdata(struct brcmf_pub
*drvr
, int ifidx
,
792 u8 offset
, struct sk_buff
*skb
)
794 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
795 struct brcmf_flowring
*flow
= msgbuf
->flow
;
796 struct ethhdr
*eh
= (struct ethhdr
*)(skb
->data
);
799 flowid
= brcmf_flowring_lookup(flow
, eh
->h_dest
, skb
->priority
, ifidx
);
800 if (flowid
== BRCMF_FLOWRING_INVALID_ID
) {
801 flowid
= brcmf_msgbuf_flowring_create(msgbuf
, ifidx
, skb
);
802 if (flowid
== BRCMF_FLOWRING_INVALID_ID
)
805 brcmf_flowring_enqueue(flow
, flowid
, skb
);
806 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, false);
813 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub
*drvr
, int ifidx
,
814 enum proto_addr_mode addr_mode
)
816 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
818 brcmf_flowring_configure_addr_mode(msgbuf
->flow
, ifidx
, addr_mode
);
823 brcmf_msgbuf_delete_peer(struct brcmf_pub
*drvr
, int ifidx
, u8 peer
[ETH_ALEN
])
825 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
827 brcmf_flowring_delete_peer(msgbuf
->flow
, ifidx
, peer
);
832 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub
*drvr
, int ifidx
, u8 peer
[ETH_ALEN
])
834 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
836 brcmf_flowring_add_tdls_peer(msgbuf
->flow
, ifidx
, peer
);
841 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf
*msgbuf
, void *buf
)
843 struct msgbuf_ioctl_resp_hdr
*ioctl_resp
;
845 ioctl_resp
= (struct msgbuf_ioctl_resp_hdr
*)buf
;
847 msgbuf
->ioctl_resp_status
=
848 (s16
)le16_to_cpu(ioctl_resp
->compl_hdr
.status
);
849 msgbuf
->ioctl_resp_ret_len
= le16_to_cpu(ioctl_resp
->resp_len
);
850 msgbuf
->ioctl_resp_pktid
= le32_to_cpu(ioctl_resp
->msg
.request_id
);
852 brcmf_msgbuf_ioctl_resp_wake(msgbuf
);
854 if (msgbuf
->cur_ioctlrespbuf
)
855 msgbuf
->cur_ioctlrespbuf
--;
856 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf
);
861 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf
*msgbuf
, void *buf
)
863 struct brcmf_commonring
*commonring
;
864 struct msgbuf_tx_status
*tx_status
;
869 tx_status
= (struct msgbuf_tx_status
*)buf
;
870 idx
= le32_to_cpu(tx_status
->msg
.request_id
);
871 flowid
= le16_to_cpu(tx_status
->compl_hdr
.flow_ring_id
);
872 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
873 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
874 msgbuf
->tx_pktids
, idx
);
878 set_bit(flowid
, msgbuf
->txstatus_done_map
);
879 commonring
= msgbuf
->flowrings
[flowid
];
880 atomic_dec(&commonring
->outstanding_tx
);
882 brcmf_txfinalize(msgbuf
->drvr
, skb
, tx_status
->msg
.ifidx
, true);
886 static u32
brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf
*msgbuf
, u32 count
)
888 struct brcmf_commonring
*commonring
;
894 struct msgbuf_rx_bufpost
*rx_bufpost
;
899 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_RXPOST_SUBMIT
];
900 ret_ptr
= brcmf_commonring_reserve_for_write_multiple(commonring
,
904 brcmf_dbg(MSGBUF
, "Failed to reserve space in commonring\n");
908 for (i
= 0; i
< alloced
; i
++) {
909 rx_bufpost
= (struct msgbuf_rx_bufpost
*)ret_ptr
;
910 memset(rx_bufpost
, 0, sizeof(*rx_bufpost
));
912 skb
= brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE
);
915 brcmf_err("Failed to alloc SKB\n");
916 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
921 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
922 msgbuf
->rx_pktids
, skb
, 0,
923 &physaddr
, &pktid
)) {
924 dev_kfree_skb_any(skb
);
925 brcmf_err("No PKTID available !!\n");
926 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
930 if (msgbuf
->rx_metadata_offset
) {
931 address
= (u64
)physaddr
;
932 rx_bufpost
->metadata_buf_len
=
933 cpu_to_le16(msgbuf
->rx_metadata_offset
);
934 rx_bufpost
->metadata_buf_addr
.high_addr
=
935 cpu_to_le32(address
>> 32);
936 rx_bufpost
->metadata_buf_addr
.low_addr
=
937 cpu_to_le32(address
& 0xffffffff);
939 skb_pull(skb
, msgbuf
->rx_metadata_offset
);
941 physaddr
+= msgbuf
->rx_metadata_offset
;
943 rx_bufpost
->msg
.msgtype
= MSGBUF_TYPE_RXBUF_POST
;
944 rx_bufpost
->msg
.request_id
= cpu_to_le32(pktid
);
946 address
= (u64
)physaddr
;
947 rx_bufpost
->data_buf_len
= cpu_to_le16((u16
)pktlen
);
948 rx_bufpost
->data_buf_addr
.high_addr
=
949 cpu_to_le32(address
>> 32);
950 rx_bufpost
->data_buf_addr
.low_addr
=
951 cpu_to_le32(address
& 0xffffffff);
953 ret_ptr
+= brcmf_commonring_len_item(commonring
);
957 brcmf_commonring_write_complete(commonring
);
964 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf
*msgbuf
)
969 fillbufs
= msgbuf
->max_rxbufpost
- msgbuf
->rxbufpost
;
972 retcount
= brcmf_msgbuf_rxbuf_data_post(msgbuf
, fillbufs
);
975 msgbuf
->rxbufpost
+= retcount
;
976 fillbufs
-= retcount
;
982 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf
*msgbuf
, u16 rxcnt
)
984 msgbuf
->rxbufpost
-= rxcnt
;
985 if (msgbuf
->rxbufpost
<= (msgbuf
->max_rxbufpost
-
986 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD
))
987 brcmf_msgbuf_rxbuf_data_fill(msgbuf
);
992 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf
*msgbuf
, bool event_buf
,
995 struct brcmf_commonring
*commonring
;
1000 dma_addr_t physaddr
;
1001 struct msgbuf_rx_ioctl_resp_or_event
*rx_bufpost
;
1006 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
1007 brcmf_commonring_lock(commonring
);
1008 ret_ptr
= brcmf_commonring_reserve_for_write_multiple(commonring
,
1012 brcmf_err("Failed to reserve space in commonring\n");
1013 brcmf_commonring_unlock(commonring
);
1017 for (i
= 0; i
< alloced
; i
++) {
1018 rx_bufpost
= (struct msgbuf_rx_ioctl_resp_or_event
*)ret_ptr
;
1019 memset(rx_bufpost
, 0, sizeof(*rx_bufpost
));
1021 skb
= brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE
);
1024 brcmf_err("Failed to alloc SKB\n");
1025 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
1030 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
1031 msgbuf
->rx_pktids
, skb
, 0,
1032 &physaddr
, &pktid
)) {
1033 dev_kfree_skb_any(skb
);
1034 brcmf_err("No PKTID available !!\n");
1035 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
1039 rx_bufpost
->msg
.msgtype
= MSGBUF_TYPE_EVENT_BUF_POST
;
1041 rx_bufpost
->msg
.msgtype
=
1042 MSGBUF_TYPE_IOCTLRESP_BUF_POST
;
1043 rx_bufpost
->msg
.request_id
= cpu_to_le32(pktid
);
1045 address
= (u64
)physaddr
;
1046 rx_bufpost
->host_buf_len
= cpu_to_le16((u16
)pktlen
);
1047 rx_bufpost
->host_buf_addr
.high_addr
=
1048 cpu_to_le32(address
>> 32);
1049 rx_bufpost
->host_buf_addr
.low_addr
=
1050 cpu_to_le32(address
& 0xffffffff);
1052 ret_ptr
+= brcmf_commonring_len_item(commonring
);
1056 brcmf_commonring_write_complete(commonring
);
1058 brcmf_commonring_unlock(commonring
);
1064 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf
*msgbuf
)
1068 count
= msgbuf
->max_ioctlrespbuf
- msgbuf
->cur_ioctlrespbuf
;
1069 count
= brcmf_msgbuf_rxbuf_ctrl_post(msgbuf
, false, count
);
1070 msgbuf
->cur_ioctlrespbuf
+= count
;
1074 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf
*msgbuf
)
1078 count
= msgbuf
->max_eventbuf
- msgbuf
->cur_eventbuf
;
1079 count
= brcmf_msgbuf_rxbuf_ctrl_post(msgbuf
, true, count
);
1080 msgbuf
->cur_eventbuf
+= count
;
1085 brcmf_msgbuf_rx_skb(struct brcmf_msgbuf
*msgbuf
, struct sk_buff
*skb
,
1088 struct brcmf_if
*ifp
;
1090 /* The ifidx is the idx to map to matching netdev/ifp. When receiving
1091 * events this is easy because it contains the bssidx which maps
1092 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
1093 * bssidx 1 is used for p2p0 and no data can be received or
1094 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
1098 ifp
= msgbuf
->drvr
->iflist
[ifidx
];
1099 if (!ifp
|| !ifp
->ndev
) {
1100 brcmf_err("Received pkt for invalid ifidx %d\n", ifidx
);
1101 brcmu_pkt_buf_free_skb(skb
);
1104 brcmf_netif_rx(ifp
, skb
);
1108 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1110 struct msgbuf_rx_event
*event
;
1113 struct sk_buff
*skb
;
1115 event
= (struct msgbuf_rx_event
*)buf
;
1116 idx
= le32_to_cpu(event
->msg
.request_id
);
1117 buflen
= le16_to_cpu(event
->event_data_len
);
1119 if (msgbuf
->cur_eventbuf
)
1120 msgbuf
->cur_eventbuf
--;
1121 brcmf_msgbuf_rxbuf_event_post(msgbuf
);
1123 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
1124 msgbuf
->rx_pktids
, idx
);
1128 if (msgbuf
->rx_dataoffset
)
1129 skb_pull(skb
, msgbuf
->rx_dataoffset
);
1131 skb_trim(skb
, buflen
);
1133 brcmf_msgbuf_rx_skb(msgbuf
, skb
, event
->msg
.ifidx
);
1138 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1140 struct msgbuf_rx_complete
*rx_complete
;
1141 struct sk_buff
*skb
;
1146 brcmf_msgbuf_update_rxbufpost_count(msgbuf
, 1);
1148 rx_complete
= (struct msgbuf_rx_complete
*)buf
;
1149 data_offset
= le16_to_cpu(rx_complete
->data_offset
);
1150 buflen
= le16_to_cpu(rx_complete
->data_len
);
1151 idx
= le32_to_cpu(rx_complete
->msg
.request_id
);
1153 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
1154 msgbuf
->rx_pktids
, idx
);
1159 skb_pull(skb
, data_offset
);
1160 else if (msgbuf
->rx_dataoffset
)
1161 skb_pull(skb
, msgbuf
->rx_dataoffset
);
1163 skb_trim(skb
, buflen
);
1165 brcmf_msgbuf_rx_skb(msgbuf
, skb
, rx_complete
->msg
.ifidx
);
1170 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf
*msgbuf
,
1173 struct msgbuf_flowring_create_resp
*flowring_create_resp
;
1177 flowring_create_resp
= (struct msgbuf_flowring_create_resp
*)buf
;
1179 flowid
= le16_to_cpu(flowring_create_resp
->compl_hdr
.flow_ring_id
);
1180 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1181 status
= le16_to_cpu(flowring_create_resp
->compl_hdr
.status
);
1184 brcmf_err("Flowring creation failed, code %d\n", status
);
1185 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1188 brcmf_dbg(MSGBUF
, "Flowring %d Create response status %d\n", flowid
,
1191 brcmf_flowring_open(msgbuf
->flow
, flowid
);
1193 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, true);
1198 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf
*msgbuf
,
1201 struct msgbuf_flowring_delete_resp
*flowring_delete_resp
;
1205 flowring_delete_resp
= (struct msgbuf_flowring_delete_resp
*)buf
;
1207 flowid
= le16_to_cpu(flowring_delete_resp
->compl_hdr
.flow_ring_id
);
1208 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1209 status
= le16_to_cpu(flowring_delete_resp
->compl_hdr
.status
);
1212 brcmf_err("Flowring deletion failed, code %d\n", status
);
1213 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
1216 brcmf_dbg(MSGBUF
, "Flowring %d Delete response status %d\n", flowid
,
1219 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1223 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1225 struct msgbuf_common_hdr
*msg
;
1227 msg
= (struct msgbuf_common_hdr
*)buf
;
1228 switch (msg
->msgtype
) {
1229 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT
:
1230 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1231 brcmf_msgbuf_process_flow_ring_create_response(msgbuf
, buf
);
1233 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT
:
1234 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1235 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf
, buf
);
1237 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK
:
1238 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1240 case MSGBUF_TYPE_IOCTL_CMPLT
:
1241 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1242 brcmf_msgbuf_process_ioctl_complete(msgbuf
, buf
);
1244 case MSGBUF_TYPE_WL_EVENT
:
1245 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_WL_EVENT\n");
1246 brcmf_msgbuf_process_event(msgbuf
, buf
);
1248 case MSGBUF_TYPE_TX_STATUS
:
1249 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_TX_STATUS\n");
1250 brcmf_msgbuf_process_txstatus(msgbuf
, buf
);
1252 case MSGBUF_TYPE_RX_CMPLT
:
1253 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_RX_CMPLT\n");
1254 brcmf_msgbuf_process_rx_complete(msgbuf
, buf
);
1257 brcmf_err("Unsupported msgtype %d\n", msg
->msgtype
);
1263 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf
*msgbuf
,
1264 struct brcmf_commonring
*commonring
)
1270 buf
= brcmf_commonring_get_read_ptr(commonring
, &count
);
1275 brcmf_msgbuf_process_msgtype(msgbuf
,
1276 buf
+ msgbuf
->rx_dataoffset
);
1277 buf
+= brcmf_commonring_len_item(commonring
);
1280 brcmf_commonring_read_complete(commonring
);
1282 if (commonring
->r_ptr
== 0)
1287 int brcmf_proto_msgbuf_rx_trigger(struct device
*dev
)
1289 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1290 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1291 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1292 struct brcmf_commonring
*commonring
;
1297 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_RX_COMPLETE
];
1298 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1299 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_TX_COMPLETE
];
1300 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1301 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_CONTROL_COMPLETE
];
1302 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1304 for_each_set_bit(flowid
, msgbuf
->txstatus_done_map
,
1305 msgbuf
->nrof_flowrings
) {
1306 clear_bit(flowid
, msgbuf
->txstatus_done_map
);
1307 commonring
= msgbuf
->flowrings
[flowid
];
1308 qlen
= brcmf_flowring_qlen(msgbuf
->flow
, flowid
);
1309 if ((qlen
> BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS
) ||
1310 ((qlen
) && (atomic_read(&commonring
->outstanding_tx
) <
1311 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS
)))
1312 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, true);
1319 void brcmf_msgbuf_delete_flowring(struct brcmf_pub
*drvr
, u8 flowid
)
1321 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1322 struct msgbuf_tx_flowring_delete_req
*delete;
1323 struct brcmf_commonring
*commonring
;
1328 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
1329 brcmf_commonring_lock(commonring
);
1330 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
1332 brcmf_err("FW unaware, flowring will be removed !!\n");
1333 brcmf_commonring_unlock(commonring
);
1334 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1338 delete = (struct msgbuf_tx_flowring_delete_req
*)ret_ptr
;
1340 ifidx
= brcmf_flowring_ifidx_get(msgbuf
->flow
, flowid
);
1342 delete->msg
.msgtype
= MSGBUF_TYPE_FLOW_RING_DELETE
;
1343 delete->msg
.ifidx
= ifidx
;
1344 delete->msg
.request_id
= 0;
1346 delete->flow_ring_id
= cpu_to_le16(flowid
+
1347 BRCMF_NROF_H2D_COMMON_MSGRINGS
);
1350 brcmf_dbg(MSGBUF
, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1353 err
= brcmf_commonring_write_complete(commonring
);
1354 brcmf_commonring_unlock(commonring
);
1356 brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1357 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1362 int brcmf_proto_msgbuf_attach(struct brcmf_pub
*drvr
)
1364 struct brcmf_bus_msgbuf
*if_msgbuf
;
1365 struct brcmf_msgbuf
*msgbuf
;
1369 if_msgbuf
= drvr
->bus_if
->msgbuf
;
1370 msgbuf
= kzalloc(sizeof(*msgbuf
), GFP_KERNEL
);
1374 msgbuf
->txflow_wq
= create_singlethread_workqueue("msgbuf_txflow");
1375 if (msgbuf
->txflow_wq
== NULL
) {
1376 brcmf_err("workqueue creation failed\n");
1379 INIT_WORK(&msgbuf
->txflow_work
, brcmf_msgbuf_txflow_worker
);
1380 count
= BITS_TO_LONGS(if_msgbuf
->nrof_flowrings
);
1381 count
= count
* sizeof(unsigned long);
1382 msgbuf
->flow_map
= kzalloc(count
, GFP_KERNEL
);
1383 if (!msgbuf
->flow_map
)
1386 msgbuf
->txstatus_done_map
= kzalloc(count
, GFP_KERNEL
);
1387 if (!msgbuf
->txstatus_done_map
)
1390 msgbuf
->drvr
= drvr
;
1391 msgbuf
->ioctbuf
= dma_alloc_coherent(drvr
->bus_if
->dev
,
1392 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1393 &msgbuf
->ioctbuf_handle
,
1395 if (!msgbuf
->ioctbuf
)
1397 address
= (u64
)msgbuf
->ioctbuf_handle
;
1398 msgbuf
->ioctbuf_phys_hi
= address
>> 32;
1399 msgbuf
->ioctbuf_phys_lo
= address
& 0xffffffff;
1401 drvr
->proto
->hdrpull
= brcmf_msgbuf_hdrpull
;
1402 drvr
->proto
->query_dcmd
= brcmf_msgbuf_query_dcmd
;
1403 drvr
->proto
->set_dcmd
= brcmf_msgbuf_set_dcmd
;
1404 drvr
->proto
->txdata
= brcmf_msgbuf_txdata
;
1405 drvr
->proto
->configure_addr_mode
= brcmf_msgbuf_configure_addr_mode
;
1406 drvr
->proto
->delete_peer
= brcmf_msgbuf_delete_peer
;
1407 drvr
->proto
->add_tdls_peer
= brcmf_msgbuf_add_tdls_peer
;
1408 drvr
->proto
->pd
= msgbuf
;
1410 init_waitqueue_head(&msgbuf
->ioctl_resp_wait
);
1412 msgbuf
->commonrings
=
1413 (struct brcmf_commonring
**)if_msgbuf
->commonrings
;
1414 msgbuf
->flowrings
= (struct brcmf_commonring
**)if_msgbuf
->flowrings
;
1415 msgbuf
->nrof_flowrings
= if_msgbuf
->nrof_flowrings
;
1416 msgbuf
->flowring_dma_handle
= kzalloc(msgbuf
->nrof_flowrings
*
1417 sizeof(*msgbuf
->flowring_dma_handle
), GFP_KERNEL
);
1418 if (!msgbuf
->flowring_dma_handle
)
1421 msgbuf
->rx_dataoffset
= if_msgbuf
->rx_dataoffset
;
1422 msgbuf
->max_rxbufpost
= if_msgbuf
->max_rxbufpost
;
1424 msgbuf
->max_ioctlrespbuf
= BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST
;
1425 msgbuf
->max_eventbuf
= BRCMF_MSGBUF_MAX_EVENTBUF_POST
;
1427 msgbuf
->tx_pktids
= brcmf_msgbuf_init_pktids(NR_TX_PKTIDS
,
1429 if (!msgbuf
->tx_pktids
)
1431 msgbuf
->rx_pktids
= brcmf_msgbuf_init_pktids(NR_RX_PKTIDS
,
1433 if (!msgbuf
->rx_pktids
)
1436 msgbuf
->flow
= brcmf_flowring_attach(drvr
->bus_if
->dev
,
1437 if_msgbuf
->nrof_flowrings
);
1442 brcmf_dbg(MSGBUF
, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1443 msgbuf
->max_rxbufpost
, msgbuf
->max_eventbuf
,
1444 msgbuf
->max_ioctlrespbuf
);
1447 brcmf_msgbuf_rxbuf_data_fill(msgbuf
);
1448 if (msgbuf
->max_rxbufpost
!= msgbuf
->rxbufpost
)
1453 } while (count
< 10);
1454 brcmf_msgbuf_rxbuf_event_post(msgbuf
);
1455 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf
);
1457 INIT_WORK(&msgbuf
->flowring_work
, brcmf_msgbuf_flowring_worker
);
1458 spin_lock_init(&msgbuf
->flowring_work_lock
);
1459 INIT_LIST_HEAD(&msgbuf
->work_queue
);
1465 kfree(msgbuf
->flow_map
);
1466 kfree(msgbuf
->txstatus_done_map
);
1467 brcmf_msgbuf_release_pktids(msgbuf
);
1468 kfree(msgbuf
->flowring_dma_handle
);
1469 if (msgbuf
->ioctbuf
)
1470 dma_free_coherent(drvr
->bus_if
->dev
,
1471 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1473 msgbuf
->ioctbuf_handle
);
1480 void brcmf_proto_msgbuf_detach(struct brcmf_pub
*drvr
)
1482 struct brcmf_msgbuf
*msgbuf
;
1483 struct brcmf_msgbuf_work_item
*work
;
1485 brcmf_dbg(TRACE
, "Enter\n");
1486 if (drvr
->proto
->pd
) {
1487 msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1488 cancel_work_sync(&msgbuf
->flowring_work
);
1489 while (!list_empty(&msgbuf
->work_queue
)) {
1490 work
= list_first_entry(&msgbuf
->work_queue
,
1491 struct brcmf_msgbuf_work_item
,
1493 list_del(&work
->queue
);
1496 kfree(msgbuf
->flow_map
);
1497 kfree(msgbuf
->txstatus_done_map
);
1498 if (msgbuf
->txflow_wq
)
1499 destroy_workqueue(msgbuf
->txflow_wq
);
1501 brcmf_flowring_detach(msgbuf
->flow
);
1502 dma_free_coherent(drvr
->bus_if
->dev
,
1503 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1504 msgbuf
->ioctbuf
, msgbuf
->ioctbuf_handle
);
1505 brcmf_msgbuf_release_pktids(msgbuf
);
1506 kfree(msgbuf
->flowring_dma_handle
);
1508 drvr
->proto
->pd
= NULL
;