1 /* Copyright (c) 2014 Broadcom Corporation
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /*******************************************************************************
17 * Communicates with the dongle by using dcmd codes.
18 * For certain dcmd codes, the dongle interprets string data from the host.
19 ******************************************************************************/
21 #include <linux/types.h>
22 #include <linux/netdevice.h>
24 #include <brcmu_utils.h>
25 #include <brcmu_wifi.h>
31 #include "commonring.h"
34 #include "tracepoint.h"
37 #define MSGBUF_IOCTL_RESP_TIMEOUT 2000
39 #define MSGBUF_TYPE_GEN_STATUS 0x1
40 #define MSGBUF_TYPE_RING_STATUS 0x2
41 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
42 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
43 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
44 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
45 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
46 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
47 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
48 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
49 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
50 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC
51 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD
52 #define MSGBUF_TYPE_WL_EVENT 0xE
53 #define MSGBUF_TYPE_TX_POST 0xF
54 #define MSGBUF_TYPE_TX_STATUS 0x10
55 #define MSGBUF_TYPE_RXBUF_POST 0x11
56 #define MSGBUF_TYPE_RX_CMPLT 0x12
57 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13
58 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
60 #define NR_TX_PKTIDS 2048
61 #define NR_RX_PKTIDS 1024
63 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE
65 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
66 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
67 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
68 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
70 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
71 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
73 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
74 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
76 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
77 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
79 struct msgbuf_common_hdr
{
87 struct msgbuf_buf_addr
{
92 struct msgbuf_ioctl_req_hdr
{
93 struct msgbuf_common_hdr msg
;
97 __le16 output_buf_len
;
99 struct msgbuf_buf_addr req_buf_addr
;
103 struct msgbuf_tx_msghdr
{
104 struct msgbuf_common_hdr msg
;
108 struct msgbuf_buf_addr metadata_buf_addr
;
109 struct msgbuf_buf_addr data_buf_addr
;
110 __le16 metadata_buf_len
;
115 struct msgbuf_rx_bufpost
{
116 struct msgbuf_common_hdr msg
;
117 __le16 metadata_buf_len
;
120 struct msgbuf_buf_addr metadata_buf_addr
;
121 struct msgbuf_buf_addr data_buf_addr
;
124 struct msgbuf_rx_ioctl_resp_or_event
{
125 struct msgbuf_common_hdr msg
;
128 struct msgbuf_buf_addr host_buf_addr
;
132 struct msgbuf_completion_hdr
{
137 struct msgbuf_rx_event
{
138 struct msgbuf_common_hdr msg
;
139 struct msgbuf_completion_hdr compl_hdr
;
140 __le16 event_data_len
;
145 struct msgbuf_ioctl_resp_hdr
{
146 struct msgbuf_common_hdr msg
;
147 struct msgbuf_completion_hdr compl_hdr
;
154 struct msgbuf_tx_status
{
155 struct msgbuf_common_hdr msg
;
156 struct msgbuf_completion_hdr compl_hdr
;
161 struct msgbuf_rx_complete
{
162 struct msgbuf_common_hdr msg
;
163 struct msgbuf_completion_hdr compl_hdr
;
173 struct msgbuf_tx_flowring_create_req
{
174 struct msgbuf_common_hdr msg
;
185 struct msgbuf_buf_addr flow_ring_addr
;
188 struct msgbuf_tx_flowring_delete_req
{
189 struct msgbuf_common_hdr msg
;
195 struct msgbuf_flowring_create_resp
{
196 struct msgbuf_common_hdr msg
;
197 struct msgbuf_completion_hdr compl_hdr
;
201 struct msgbuf_flowring_delete_resp
{
202 struct msgbuf_common_hdr msg
;
203 struct msgbuf_completion_hdr compl_hdr
;
207 struct msgbuf_flowring_flush_resp
{
208 struct msgbuf_common_hdr msg
;
209 struct msgbuf_completion_hdr compl_hdr
;
213 struct brcmf_msgbuf_work_item
{
214 struct list_head queue
;
221 struct brcmf_msgbuf
{
222 struct brcmf_pub
*drvr
;
224 struct brcmf_commonring
**commonrings
;
225 struct brcmf_commonring
**flowrings
;
226 dma_addr_t
*flowring_dma_handle
;
231 u16 rx_metadata_offset
;
234 u32 max_ioctlrespbuf
;
235 u32 cur_ioctlrespbuf
;
240 dma_addr_t ioctbuf_handle
;
243 int ioctl_resp_status
;
244 u32 ioctl_resp_ret_len
;
245 u32 ioctl_resp_pktid
;
250 wait_queue_head_t ioctl_resp_wait
;
253 struct brcmf_msgbuf_pktids
*tx_pktids
;
254 struct brcmf_msgbuf_pktids
*rx_pktids
;
255 struct brcmf_flowring
*flow
;
257 struct workqueue_struct
*txflow_wq
;
258 struct work_struct txflow_work
;
259 unsigned long *flow_map
;
260 unsigned long *txstatus_done_map
;
262 struct work_struct flowring_work
;
263 spinlock_t flowring_work_lock
;
264 struct list_head work_queue
;
267 struct brcmf_msgbuf_pktid
{
274 struct brcmf_msgbuf_pktids
{
276 u32 last_allocated_idx
;
277 enum dma_data_direction direction
;
278 struct brcmf_msgbuf_pktid
*array
;
282 /* dma flushing needs implementation for mips and arm platforms. Should
283 * be put in util. Note, this is not real flushing. It is virtual non
284 * cached memory. Only write buffers should have to be drained. Though
285 * this may be different depending on platform......
287 #define brcmf_dma_flush(addr, len)
288 #define brcmf_dma_invalidate_cache(addr, len)
291 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf
*msgbuf
);
294 static struct brcmf_msgbuf_pktids
*
295 brcmf_msgbuf_init_pktids(u32 nr_array_entries
,
296 enum dma_data_direction direction
)
298 struct brcmf_msgbuf_pktid
*array
;
299 struct brcmf_msgbuf_pktids
*pktids
;
301 array
= kcalloc(nr_array_entries
, sizeof(*array
), GFP_KERNEL
);
305 pktids
= kzalloc(sizeof(*pktids
), GFP_KERNEL
);
310 pktids
->array
= array
;
311 pktids
->array_size
= nr_array_entries
;
318 brcmf_msgbuf_alloc_pktid(struct device
*dev
,
319 struct brcmf_msgbuf_pktids
*pktids
,
320 struct sk_buff
*skb
, u16 data_offset
,
321 dma_addr_t
*physaddr
, u32
*idx
)
323 struct brcmf_msgbuf_pktid
*array
;
326 array
= pktids
->array
;
328 *physaddr
= dma_map_single(dev
, skb
->data
+ data_offset
,
329 skb
->len
- data_offset
, pktids
->direction
);
331 if (dma_mapping_error(dev
, *physaddr
)) {
332 brcmf_err("dma_map_single failed !!\n");
336 *idx
= pktids
->last_allocated_idx
;
341 if (*idx
== pktids
->array_size
)
343 if (array
[*idx
].allocated
.counter
== 0)
344 if (atomic_cmpxchg(&array
[*idx
].allocated
, 0, 1) == 0)
347 } while (count
< pktids
->array_size
);
349 if (count
== pktids
->array_size
)
352 array
[*idx
].data_offset
= data_offset
;
353 array
[*idx
].physaddr
= *physaddr
;
354 array
[*idx
].skb
= skb
;
356 pktids
->last_allocated_idx
= *idx
;
362 static struct sk_buff
*
363 brcmf_msgbuf_get_pktid(struct device
*dev
, struct brcmf_msgbuf_pktids
*pktids
,
366 struct brcmf_msgbuf_pktid
*pktid
;
369 if (idx
>= pktids
->array_size
) {
370 brcmf_err("Invalid packet id %d (max %d)\n", idx
,
374 if (pktids
->array
[idx
].allocated
.counter
) {
375 pktid
= &pktids
->array
[idx
];
376 dma_unmap_single(dev
, pktid
->physaddr
,
377 pktid
->skb
->len
- pktid
->data_offset
,
380 pktid
->allocated
.counter
= 0;
383 brcmf_err("Invalid packet id %d (not in use)\n", idx
);
391 brcmf_msgbuf_release_array(struct device
*dev
,
392 struct brcmf_msgbuf_pktids
*pktids
)
394 struct brcmf_msgbuf_pktid
*array
;
395 struct brcmf_msgbuf_pktid
*pktid
;
398 array
= pktids
->array
;
401 if (array
[count
].allocated
.counter
) {
402 pktid
= &array
[count
];
403 dma_unmap_single(dev
, pktid
->physaddr
,
404 pktid
->skb
->len
- pktid
->data_offset
,
406 brcmu_pkt_buf_free_skb(pktid
->skb
);
409 } while (count
< pktids
->array_size
);
416 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf
*msgbuf
)
418 if (msgbuf
->rx_pktids
)
419 brcmf_msgbuf_release_array(msgbuf
->drvr
->bus_if
->dev
,
421 if (msgbuf
->tx_pktids
)
422 brcmf_msgbuf_release_array(msgbuf
->drvr
->bus_if
->dev
,
427 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub
*drvr
, int ifidx
,
428 uint cmd
, void *buf
, uint len
)
430 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
431 struct brcmf_commonring
*commonring
;
432 struct msgbuf_ioctl_req_hdr
*request
;
437 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
438 brcmf_commonring_lock(commonring
);
439 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
441 brcmf_err("Failed to reserve space in commonring\n");
442 brcmf_commonring_unlock(commonring
);
448 request
= (struct msgbuf_ioctl_req_hdr
*)ret_ptr
;
449 request
->msg
.msgtype
= MSGBUF_TYPE_IOCTLPTR_REQ
;
450 request
->msg
.ifidx
= (u8
)ifidx
;
451 request
->msg
.flags
= 0;
452 request
->msg
.request_id
= cpu_to_le32(BRCMF_IOCTL_REQ_PKTID
);
453 request
->cmd
= cpu_to_le32(cmd
);
454 request
->output_buf_len
= cpu_to_le16(len
);
455 request
->trans_id
= cpu_to_le16(msgbuf
->reqid
);
457 buf_len
= min_t(u16
, len
, BRCMF_TX_IOCTL_MAX_MSG_SIZE
);
458 request
->input_buf_len
= cpu_to_le16(buf_len
);
459 request
->req_buf_addr
.high_addr
= cpu_to_le32(msgbuf
->ioctbuf_phys_hi
);
460 request
->req_buf_addr
.low_addr
= cpu_to_le32(msgbuf
->ioctbuf_phys_lo
);
462 memcpy(msgbuf
->ioctbuf
, buf
, buf_len
);
464 memset(msgbuf
->ioctbuf
, 0, buf_len
);
465 brcmf_dma_flush(ioctl_buf
, buf_len
);
467 err
= brcmf_commonring_write_complete(commonring
);
468 brcmf_commonring_unlock(commonring
);
474 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf
*msgbuf
)
476 return wait_event_timeout(msgbuf
->ioctl_resp_wait
,
477 msgbuf
->ctl_completed
,
478 msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT
));
482 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf
*msgbuf
)
484 msgbuf
->ctl_completed
= true;
485 if (waitqueue_active(&msgbuf
->ioctl_resp_wait
))
486 wake_up(&msgbuf
->ioctl_resp_wait
);
490 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub
*drvr
, int ifidx
,
491 uint cmd
, void *buf
, uint len
)
493 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
494 struct sk_buff
*skb
= NULL
;
498 brcmf_dbg(MSGBUF
, "ifidx=%d, cmd=%d, len=%d\n", ifidx
, cmd
, len
);
499 msgbuf
->ctl_completed
= false;
500 err
= brcmf_msgbuf_tx_ioctl(drvr
, ifidx
, cmd
, buf
, len
);
504 timeout
= brcmf_msgbuf_ioctl_resp_wait(msgbuf
);
506 brcmf_err("Timeout on response for query command\n");
510 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
512 msgbuf
->ioctl_resp_pktid
);
513 if (msgbuf
->ioctl_resp_ret_len
!= 0) {
515 brcmf_err("Invalid packet id idx recv'd %d\n",
516 msgbuf
->ioctl_resp_pktid
);
519 memcpy(buf
, skb
->data
, (len
< msgbuf
->ioctl_resp_ret_len
) ?
520 len
: msgbuf
->ioctl_resp_ret_len
);
522 brcmu_pkt_buf_free_skb(skb
);
524 return msgbuf
->ioctl_resp_status
;
528 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub
*drvr
, int ifidx
,
529 uint cmd
, void *buf
, uint len
)
531 return brcmf_msgbuf_query_dcmd(drvr
, ifidx
, cmd
, buf
, len
);
535 static int brcmf_msgbuf_hdrpull(struct brcmf_pub
*drvr
, bool do_fws
,
536 u8
*ifidx
, struct sk_buff
*skb
)
543 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf
*msgbuf
, u16 flowid
)
548 brcmf_dbg(MSGBUF
, "Removing flowring %d\n", flowid
);
550 dma_sz
= BRCMF_H2D_TXFLOWRING_MAX_ITEM
* BRCMF_H2D_TXFLOWRING_ITEMSIZE
;
551 dma_buf
= msgbuf
->flowrings
[flowid
]->buf_addr
;
552 dma_free_coherent(msgbuf
->drvr
->bus_if
->dev
, dma_sz
, dma_buf
,
553 msgbuf
->flowring_dma_handle
[flowid
]);
555 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
559 static struct brcmf_msgbuf_work_item
*
560 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf
*msgbuf
)
562 struct brcmf_msgbuf_work_item
*work
= NULL
;
565 spin_lock_irqsave(&msgbuf
->flowring_work_lock
, flags
);
566 if (!list_empty(&msgbuf
->work_queue
)) {
567 work
= list_first_entry(&msgbuf
->work_queue
,
568 struct brcmf_msgbuf_work_item
, queue
);
569 list_del(&work
->queue
);
571 spin_unlock_irqrestore(&msgbuf
->flowring_work_lock
, flags
);
578 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf
*msgbuf
,
579 struct brcmf_msgbuf_work_item
*work
)
581 struct msgbuf_tx_flowring_create_req
*create
;
582 struct brcmf_commonring
*commonring
;
590 flowid
= work
->flowid
;
591 dma_sz
= BRCMF_H2D_TXFLOWRING_MAX_ITEM
* BRCMF_H2D_TXFLOWRING_ITEMSIZE
;
592 dma_buf
= dma_alloc_coherent(msgbuf
->drvr
->bus_if
->dev
, dma_sz
,
593 &msgbuf
->flowring_dma_handle
[flowid
],
596 brcmf_err("dma_alloc_coherent failed\n");
597 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
598 return BRCMF_FLOWRING_INVALID_ID
;
601 brcmf_commonring_config(msgbuf
->flowrings
[flowid
],
602 BRCMF_H2D_TXFLOWRING_MAX_ITEM
,
603 BRCMF_H2D_TXFLOWRING_ITEMSIZE
, dma_buf
);
605 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
606 brcmf_commonring_lock(commonring
);
607 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
609 brcmf_err("Failed to reserve space in commonring\n");
610 brcmf_commonring_unlock(commonring
);
611 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
612 return BRCMF_FLOWRING_INVALID_ID
;
615 create
= (struct msgbuf_tx_flowring_create_req
*)ret_ptr
;
616 create
->msg
.msgtype
= MSGBUF_TYPE_FLOW_RING_CREATE
;
617 create
->msg
.ifidx
= work
->ifidx
;
618 create
->msg
.request_id
= 0;
619 create
->tid
= brcmf_flowring_tid(msgbuf
->flow
, flowid
);
620 create
->flow_ring_id
= cpu_to_le16(flowid
+
621 BRCMF_NROF_H2D_COMMON_MSGRINGS
);
622 memcpy(create
->sa
, work
->sa
, ETH_ALEN
);
623 memcpy(create
->da
, work
->da
, ETH_ALEN
);
624 address
= (u64
)msgbuf
->flowring_dma_handle
[flowid
];
625 create
->flow_ring_addr
.high_addr
= cpu_to_le32(address
>> 32);
626 create
->flow_ring_addr
.low_addr
= cpu_to_le32(address
& 0xffffffff);
627 create
->max_items
= cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM
);
628 create
->len_item
= cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE
);
630 brcmf_dbg(MSGBUF
, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
631 flowid
, work
->da
, create
->tid
, work
->ifidx
);
633 err
= brcmf_commonring_write_complete(commonring
);
634 brcmf_commonring_unlock(commonring
);
636 brcmf_err("Failed to write commonring\n");
637 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
638 return BRCMF_FLOWRING_INVALID_ID
;
645 static void brcmf_msgbuf_flowring_worker(struct work_struct
*work
)
647 struct brcmf_msgbuf
*msgbuf
;
648 struct brcmf_msgbuf_work_item
*create
;
650 msgbuf
= container_of(work
, struct brcmf_msgbuf
, flowring_work
);
652 while ((create
= brcmf_msgbuf_dequeue_work(msgbuf
))) {
653 brcmf_msgbuf_flowring_create_worker(msgbuf
, create
);
659 static u32
brcmf_msgbuf_flowring_create(struct brcmf_msgbuf
*msgbuf
, int ifidx
,
662 struct brcmf_msgbuf_work_item
*create
;
663 struct ethhdr
*eh
= (struct ethhdr
*)(skb
->data
);
667 create
= kzalloc(sizeof(*create
), GFP_ATOMIC
);
669 return BRCMF_FLOWRING_INVALID_ID
;
671 flowid
= brcmf_flowring_create(msgbuf
->flow
, eh
->h_dest
,
672 skb
->priority
, ifidx
);
673 if (flowid
== BRCMF_FLOWRING_INVALID_ID
) {
678 create
->flowid
= flowid
;
679 create
->ifidx
= ifidx
;
680 memcpy(create
->sa
, eh
->h_source
, ETH_ALEN
);
681 memcpy(create
->da
, eh
->h_dest
, ETH_ALEN
);
683 spin_lock_irqsave(&msgbuf
->flowring_work_lock
, flags
);
684 list_add_tail(&create
->queue
, &msgbuf
->work_queue
);
685 spin_unlock_irqrestore(&msgbuf
->flowring_work_lock
, flags
);
686 schedule_work(&msgbuf
->flowring_work
);
692 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf
*msgbuf
, u8 flowid
)
694 struct brcmf_flowring
*flow
= msgbuf
->flow
;
695 struct brcmf_commonring
*commonring
;
701 struct msgbuf_tx_msghdr
*tx_msghdr
;
704 commonring
= msgbuf
->flowrings
[flowid
];
705 if (!brcmf_commonring_write_available(commonring
))
708 brcmf_commonring_lock(commonring
);
710 count
= BRCMF_MSGBUF_TX_FLUSH_CNT2
- BRCMF_MSGBUF_TX_FLUSH_CNT1
;
711 while (brcmf_flowring_qlen(flow
, flowid
)) {
712 skb
= brcmf_flowring_dequeue(flow
, flowid
);
714 brcmf_err("No SKB, but qlen %d\n",
715 brcmf_flowring_qlen(flow
, flowid
));
719 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
720 msgbuf
->tx_pktids
, skb
, ETH_HLEN
,
721 &physaddr
, &pktid
)) {
722 brcmf_flowring_reinsert(flow
, flowid
, skb
);
723 brcmf_err("No PKTID available !!\n");
726 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
728 brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
729 msgbuf
->tx_pktids
, pktid
);
730 brcmf_flowring_reinsert(flow
, flowid
, skb
);
735 tx_msghdr
= (struct msgbuf_tx_msghdr
*)ret_ptr
;
737 tx_msghdr
->msg
.msgtype
= MSGBUF_TYPE_TX_POST
;
738 tx_msghdr
->msg
.request_id
= cpu_to_le32(pktid
);
739 tx_msghdr
->msg
.ifidx
= brcmf_flowring_ifidx_get(flow
, flowid
);
740 tx_msghdr
->flags
= BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3
;
741 tx_msghdr
->flags
|= (skb
->priority
& 0x07) <<
742 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT
;
743 tx_msghdr
->seg_cnt
= 1;
744 memcpy(tx_msghdr
->txhdr
, skb
->data
, ETH_HLEN
);
745 tx_msghdr
->data_len
= cpu_to_le16(skb
->len
- ETH_HLEN
);
746 address
= (u64
)physaddr
;
747 tx_msghdr
->data_buf_addr
.high_addr
= cpu_to_le32(address
>> 32);
748 tx_msghdr
->data_buf_addr
.low_addr
=
749 cpu_to_le32(address
& 0xffffffff);
750 tx_msghdr
->metadata_buf_len
= 0;
751 tx_msghdr
->metadata_buf_addr
.high_addr
= 0;
752 tx_msghdr
->metadata_buf_addr
.low_addr
= 0;
753 atomic_inc(&commonring
->outstanding_tx
);
754 if (count
>= BRCMF_MSGBUF_TX_FLUSH_CNT2
) {
755 brcmf_commonring_write_complete(commonring
);
760 brcmf_commonring_write_complete(commonring
);
761 brcmf_commonring_unlock(commonring
);
765 static void brcmf_msgbuf_txflow_worker(struct work_struct
*worker
)
767 struct brcmf_msgbuf
*msgbuf
;
770 msgbuf
= container_of(worker
, struct brcmf_msgbuf
, txflow_work
);
771 for_each_set_bit(flowid
, msgbuf
->flow_map
, msgbuf
->nrof_flowrings
) {
772 clear_bit(flowid
, msgbuf
->flow_map
);
773 brcmf_msgbuf_txflow(msgbuf
, flowid
);
778 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf
*msgbuf
, u32 flowid
,
781 struct brcmf_commonring
*commonring
;
783 set_bit(flowid
, msgbuf
->flow_map
);
784 commonring
= msgbuf
->flowrings
[flowid
];
785 if ((force
) || (atomic_read(&commonring
->outstanding_tx
) <
786 BRCMF_MSGBUF_DELAY_TXWORKER_THRS
))
787 queue_work(msgbuf
->txflow_wq
, &msgbuf
->txflow_work
);
793 static int brcmf_msgbuf_txdata(struct brcmf_pub
*drvr
, int ifidx
,
794 u8 offset
, struct sk_buff
*skb
)
796 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
797 struct brcmf_flowring
*flow
= msgbuf
->flow
;
798 struct ethhdr
*eh
= (struct ethhdr
*)(skb
->data
);
801 flowid
= brcmf_flowring_lookup(flow
, eh
->h_dest
, skb
->priority
, ifidx
);
802 if (flowid
== BRCMF_FLOWRING_INVALID_ID
) {
803 flowid
= brcmf_msgbuf_flowring_create(msgbuf
, ifidx
, skb
);
804 if (flowid
== BRCMF_FLOWRING_INVALID_ID
)
807 brcmf_flowring_enqueue(flow
, flowid
, skb
);
808 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, false);
815 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub
*drvr
, int ifidx
,
816 enum proto_addr_mode addr_mode
)
818 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
820 brcmf_flowring_configure_addr_mode(msgbuf
->flow
, ifidx
, addr_mode
);
825 brcmf_msgbuf_delete_peer(struct brcmf_pub
*drvr
, int ifidx
, u8 peer
[ETH_ALEN
])
827 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
829 brcmf_flowring_delete_peer(msgbuf
->flow
, ifidx
, peer
);
834 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub
*drvr
, int ifidx
, u8 peer
[ETH_ALEN
])
836 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
838 brcmf_flowring_add_tdls_peer(msgbuf
->flow
, ifidx
, peer
);
843 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf
*msgbuf
, void *buf
)
845 struct msgbuf_ioctl_resp_hdr
*ioctl_resp
;
847 ioctl_resp
= (struct msgbuf_ioctl_resp_hdr
*)buf
;
849 msgbuf
->ioctl_resp_status
=
850 (s16
)le16_to_cpu(ioctl_resp
->compl_hdr
.status
);
851 msgbuf
->ioctl_resp_ret_len
= le16_to_cpu(ioctl_resp
->resp_len
);
852 msgbuf
->ioctl_resp_pktid
= le32_to_cpu(ioctl_resp
->msg
.request_id
);
854 brcmf_msgbuf_ioctl_resp_wake(msgbuf
);
856 if (msgbuf
->cur_ioctlrespbuf
)
857 msgbuf
->cur_ioctlrespbuf
--;
858 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf
);
863 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf
*msgbuf
, void *buf
)
865 struct brcmf_commonring
*commonring
;
866 struct msgbuf_tx_status
*tx_status
;
871 tx_status
= (struct msgbuf_tx_status
*)buf
;
872 idx
= le32_to_cpu(tx_status
->msg
.request_id
);
873 flowid
= le16_to_cpu(tx_status
->compl_hdr
.flow_ring_id
);
874 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
875 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
876 msgbuf
->tx_pktids
, idx
);
878 brcmf_err("Invalid packet id idx recv'd %d\n", idx
);
882 set_bit(flowid
, msgbuf
->txstatus_done_map
);
883 commonring
= msgbuf
->flowrings
[flowid
];
884 atomic_dec(&commonring
->outstanding_tx
);
886 brcmf_txfinalize(msgbuf
->drvr
, skb
, tx_status
->msg
.ifidx
, true);
890 static u32
brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf
*msgbuf
, u32 count
)
892 struct brcmf_commonring
*commonring
;
898 struct msgbuf_rx_bufpost
*rx_bufpost
;
903 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_RXPOST_SUBMIT
];
904 ret_ptr
= brcmf_commonring_reserve_for_write_multiple(commonring
,
908 brcmf_dbg(MSGBUF
, "Failed to reserve space in commonring\n");
912 for (i
= 0; i
< alloced
; i
++) {
913 rx_bufpost
= (struct msgbuf_rx_bufpost
*)ret_ptr
;
914 memset(rx_bufpost
, 0, sizeof(*rx_bufpost
));
916 skb
= brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE
);
919 brcmf_err("Failed to alloc SKB\n");
920 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
925 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
926 msgbuf
->rx_pktids
, skb
, 0,
927 &physaddr
, &pktid
)) {
928 dev_kfree_skb_any(skb
);
929 brcmf_err("No PKTID available !!\n");
930 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
934 if (msgbuf
->rx_metadata_offset
) {
935 address
= (u64
)physaddr
;
936 rx_bufpost
->metadata_buf_len
=
937 cpu_to_le16(msgbuf
->rx_metadata_offset
);
938 rx_bufpost
->metadata_buf_addr
.high_addr
=
939 cpu_to_le32(address
>> 32);
940 rx_bufpost
->metadata_buf_addr
.low_addr
=
941 cpu_to_le32(address
& 0xffffffff);
943 skb_pull(skb
, msgbuf
->rx_metadata_offset
);
945 physaddr
+= msgbuf
->rx_metadata_offset
;
947 rx_bufpost
->msg
.msgtype
= MSGBUF_TYPE_RXBUF_POST
;
948 rx_bufpost
->msg
.request_id
= cpu_to_le32(pktid
);
950 address
= (u64
)physaddr
;
951 rx_bufpost
->data_buf_len
= cpu_to_le16((u16
)pktlen
);
952 rx_bufpost
->data_buf_addr
.high_addr
=
953 cpu_to_le32(address
>> 32);
954 rx_bufpost
->data_buf_addr
.low_addr
=
955 cpu_to_le32(address
& 0xffffffff);
957 ret_ptr
+= brcmf_commonring_len_item(commonring
);
961 brcmf_commonring_write_complete(commonring
);
968 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf
*msgbuf
)
973 fillbufs
= msgbuf
->max_rxbufpost
- msgbuf
->rxbufpost
;
976 retcount
= brcmf_msgbuf_rxbuf_data_post(msgbuf
, fillbufs
);
979 msgbuf
->rxbufpost
+= retcount
;
980 fillbufs
-= retcount
;
986 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf
*msgbuf
, u16 rxcnt
)
988 msgbuf
->rxbufpost
-= rxcnt
;
989 if (msgbuf
->rxbufpost
<= (msgbuf
->max_rxbufpost
-
990 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD
))
991 brcmf_msgbuf_rxbuf_data_fill(msgbuf
);
996 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf
*msgbuf
, bool event_buf
,
999 struct brcmf_commonring
*commonring
;
1001 struct sk_buff
*skb
;
1004 dma_addr_t physaddr
;
1005 struct msgbuf_rx_ioctl_resp_or_event
*rx_bufpost
;
1010 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
1011 brcmf_commonring_lock(commonring
);
1012 ret_ptr
= brcmf_commonring_reserve_for_write_multiple(commonring
,
1016 brcmf_err("Failed to reserve space in commonring\n");
1017 brcmf_commonring_unlock(commonring
);
1021 for (i
= 0; i
< alloced
; i
++) {
1022 rx_bufpost
= (struct msgbuf_rx_ioctl_resp_or_event
*)ret_ptr
;
1023 memset(rx_bufpost
, 0, sizeof(*rx_bufpost
));
1025 skb
= brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE
);
1028 brcmf_err("Failed to alloc SKB\n");
1029 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
1034 if (brcmf_msgbuf_alloc_pktid(msgbuf
->drvr
->bus_if
->dev
,
1035 msgbuf
->rx_pktids
, skb
, 0,
1036 &physaddr
, &pktid
)) {
1037 dev_kfree_skb_any(skb
);
1038 brcmf_err("No PKTID available !!\n");
1039 brcmf_commonring_write_cancel(commonring
, alloced
- i
);
1043 rx_bufpost
->msg
.msgtype
= MSGBUF_TYPE_EVENT_BUF_POST
;
1045 rx_bufpost
->msg
.msgtype
=
1046 MSGBUF_TYPE_IOCTLRESP_BUF_POST
;
1047 rx_bufpost
->msg
.request_id
= cpu_to_le32(pktid
);
1049 address
= (u64
)physaddr
;
1050 rx_bufpost
->host_buf_len
= cpu_to_le16((u16
)pktlen
);
1051 rx_bufpost
->host_buf_addr
.high_addr
=
1052 cpu_to_le32(address
>> 32);
1053 rx_bufpost
->host_buf_addr
.low_addr
=
1054 cpu_to_le32(address
& 0xffffffff);
1056 ret_ptr
+= brcmf_commonring_len_item(commonring
);
1060 brcmf_commonring_write_complete(commonring
);
1062 brcmf_commonring_unlock(commonring
);
1068 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf
*msgbuf
)
1072 count
= msgbuf
->max_ioctlrespbuf
- msgbuf
->cur_ioctlrespbuf
;
1073 count
= brcmf_msgbuf_rxbuf_ctrl_post(msgbuf
, false, count
);
1074 msgbuf
->cur_ioctlrespbuf
+= count
;
1078 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf
*msgbuf
)
1082 count
= msgbuf
->max_eventbuf
- msgbuf
->cur_eventbuf
;
1083 count
= brcmf_msgbuf_rxbuf_ctrl_post(msgbuf
, true, count
);
1084 msgbuf
->cur_eventbuf
+= count
;
1089 brcmf_msgbuf_rx_skb(struct brcmf_msgbuf
*msgbuf
, struct sk_buff
*skb
,
1092 struct brcmf_if
*ifp
;
1094 /* The ifidx is the idx to map to matching netdev/ifp. When receiving
1095 * events this is easy because it contains the bssidx which maps
1096 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
1097 * bssidx 1 is used for p2p0 and no data can be received or
1098 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
1102 ifp
= msgbuf
->drvr
->iflist
[ifidx
];
1103 if (!ifp
|| !ifp
->ndev
) {
1104 brcmf_err("Received pkt for invalid ifidx %d\n", ifidx
);
1105 brcmu_pkt_buf_free_skb(skb
);
1108 brcmf_netif_rx(ifp
, skb
);
1112 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1114 struct msgbuf_rx_event
*event
;
1117 struct sk_buff
*skb
;
1119 event
= (struct msgbuf_rx_event
*)buf
;
1120 idx
= le32_to_cpu(event
->msg
.request_id
);
1121 buflen
= le16_to_cpu(event
->event_data_len
);
1123 if (msgbuf
->cur_eventbuf
)
1124 msgbuf
->cur_eventbuf
--;
1125 brcmf_msgbuf_rxbuf_event_post(msgbuf
);
1127 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
1128 msgbuf
->rx_pktids
, idx
);
1132 if (msgbuf
->rx_dataoffset
)
1133 skb_pull(skb
, msgbuf
->rx_dataoffset
);
1135 skb_trim(skb
, buflen
);
1137 brcmf_msgbuf_rx_skb(msgbuf
, skb
, event
->msg
.ifidx
);
1142 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1144 struct msgbuf_rx_complete
*rx_complete
;
1145 struct sk_buff
*skb
;
1150 brcmf_msgbuf_update_rxbufpost_count(msgbuf
, 1);
1152 rx_complete
= (struct msgbuf_rx_complete
*)buf
;
1153 data_offset
= le16_to_cpu(rx_complete
->data_offset
);
1154 buflen
= le16_to_cpu(rx_complete
->data_len
);
1155 idx
= le32_to_cpu(rx_complete
->msg
.request_id
);
1157 skb
= brcmf_msgbuf_get_pktid(msgbuf
->drvr
->bus_if
->dev
,
1158 msgbuf
->rx_pktids
, idx
);
1161 skb_pull(skb
, data_offset
);
1162 else if (msgbuf
->rx_dataoffset
)
1163 skb_pull(skb
, msgbuf
->rx_dataoffset
);
1165 skb_trim(skb
, buflen
);
1167 brcmf_msgbuf_rx_skb(msgbuf
, skb
, rx_complete
->msg
.ifidx
);
1172 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf
*msgbuf
,
1175 struct msgbuf_flowring_create_resp
*flowring_create_resp
;
1179 flowring_create_resp
= (struct msgbuf_flowring_create_resp
*)buf
;
1181 flowid
= le16_to_cpu(flowring_create_resp
->compl_hdr
.flow_ring_id
);
1182 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1183 status
= le16_to_cpu(flowring_create_resp
->compl_hdr
.status
);
1186 brcmf_err("Flowring creation failed, code %d\n", status
);
1187 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1190 brcmf_dbg(MSGBUF
, "Flowring %d Create response status %d\n", flowid
,
1193 brcmf_flowring_open(msgbuf
->flow
, flowid
);
1195 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, true);
1200 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf
*msgbuf
,
1203 struct msgbuf_flowring_delete_resp
*flowring_delete_resp
;
1207 flowring_delete_resp
= (struct msgbuf_flowring_delete_resp
*)buf
;
1209 flowid
= le16_to_cpu(flowring_delete_resp
->compl_hdr
.flow_ring_id
);
1210 flowid
-= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1211 status
= le16_to_cpu(flowring_delete_resp
->compl_hdr
.status
);
1214 brcmf_err("Flowring deletion failed, code %d\n", status
);
1215 brcmf_flowring_delete(msgbuf
->flow
, flowid
);
1218 brcmf_dbg(MSGBUF
, "Flowring %d Delete response status %d\n", flowid
,
1221 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1225 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf
*msgbuf
, void *buf
)
1227 struct msgbuf_common_hdr
*msg
;
1229 msg
= (struct msgbuf_common_hdr
*)buf
;
1230 switch (msg
->msgtype
) {
1231 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT
:
1232 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1233 brcmf_msgbuf_process_flow_ring_create_response(msgbuf
, buf
);
1235 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT
:
1236 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1237 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf
, buf
);
1239 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK
:
1240 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1242 case MSGBUF_TYPE_IOCTL_CMPLT
:
1243 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1244 brcmf_msgbuf_process_ioctl_complete(msgbuf
, buf
);
1246 case MSGBUF_TYPE_WL_EVENT
:
1247 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_WL_EVENT\n");
1248 brcmf_msgbuf_process_event(msgbuf
, buf
);
1250 case MSGBUF_TYPE_TX_STATUS
:
1251 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_TX_STATUS\n");
1252 brcmf_msgbuf_process_txstatus(msgbuf
, buf
);
1254 case MSGBUF_TYPE_RX_CMPLT
:
1255 brcmf_dbg(MSGBUF
, "MSGBUF_TYPE_RX_CMPLT\n");
1256 brcmf_msgbuf_process_rx_complete(msgbuf
, buf
);
1259 brcmf_err("Unsupported msgtype %d\n", msg
->msgtype
);
1265 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf
*msgbuf
,
1266 struct brcmf_commonring
*commonring
)
1272 buf
= brcmf_commonring_get_read_ptr(commonring
, &count
);
1277 brcmf_msgbuf_process_msgtype(msgbuf
,
1278 buf
+ msgbuf
->rx_dataoffset
);
1279 buf
+= brcmf_commonring_len_item(commonring
);
1282 brcmf_commonring_read_complete(commonring
);
1284 if (commonring
->r_ptr
== 0)
1289 int brcmf_proto_msgbuf_rx_trigger(struct device
*dev
)
1291 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1292 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1293 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1294 struct brcmf_commonring
*commonring
;
1299 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_RX_COMPLETE
];
1300 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1301 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_TX_COMPLETE
];
1302 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1303 buf
= msgbuf
->commonrings
[BRCMF_D2H_MSGRING_CONTROL_COMPLETE
];
1304 brcmf_msgbuf_process_rx(msgbuf
, buf
);
1306 for_each_set_bit(flowid
, msgbuf
->txstatus_done_map
,
1307 msgbuf
->nrof_flowrings
) {
1308 clear_bit(flowid
, msgbuf
->txstatus_done_map
);
1309 commonring
= msgbuf
->flowrings
[flowid
];
1310 qlen
= brcmf_flowring_qlen(msgbuf
->flow
, flowid
);
1311 if ((qlen
> BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS
) ||
1312 ((qlen
) && (atomic_read(&commonring
->outstanding_tx
) <
1313 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS
)))
1314 brcmf_msgbuf_schedule_txdata(msgbuf
, flowid
, true);
1321 void brcmf_msgbuf_delete_flowring(struct brcmf_pub
*drvr
, u8 flowid
)
1323 struct brcmf_msgbuf
*msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1324 struct msgbuf_tx_flowring_delete_req
*delete;
1325 struct brcmf_commonring
*commonring
;
1330 commonring
= msgbuf
->commonrings
[BRCMF_H2D_MSGRING_CONTROL_SUBMIT
];
1331 brcmf_commonring_lock(commonring
);
1332 ret_ptr
= brcmf_commonring_reserve_for_write(commonring
);
1334 brcmf_err("FW unaware, flowring will be removed !!\n");
1335 brcmf_commonring_unlock(commonring
);
1336 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1340 delete = (struct msgbuf_tx_flowring_delete_req
*)ret_ptr
;
1342 ifidx
= brcmf_flowring_ifidx_get(msgbuf
->flow
, flowid
);
1344 delete->msg
.msgtype
= MSGBUF_TYPE_FLOW_RING_DELETE
;
1345 delete->msg
.ifidx
= ifidx
;
1346 delete->msg
.request_id
= 0;
1348 delete->flow_ring_id
= cpu_to_le16(flowid
+
1349 BRCMF_NROF_H2D_COMMON_MSGRINGS
);
1352 brcmf_dbg(MSGBUF
, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1355 err
= brcmf_commonring_write_complete(commonring
);
1356 brcmf_commonring_unlock(commonring
);
1358 brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n");
1359 brcmf_msgbuf_remove_flowring(msgbuf
, flowid
);
1364 int brcmf_proto_msgbuf_attach(struct brcmf_pub
*drvr
)
1366 struct brcmf_bus_msgbuf
*if_msgbuf
;
1367 struct brcmf_msgbuf
*msgbuf
;
1371 if_msgbuf
= drvr
->bus_if
->msgbuf
;
1372 msgbuf
= kzalloc(sizeof(*msgbuf
), GFP_KERNEL
);
1376 msgbuf
->txflow_wq
= create_singlethread_workqueue("msgbuf_txflow");
1377 if (msgbuf
->txflow_wq
== NULL
) {
1378 brcmf_err("workqueue creation failed\n");
1381 INIT_WORK(&msgbuf
->txflow_work
, brcmf_msgbuf_txflow_worker
);
1382 count
= BITS_TO_LONGS(if_msgbuf
->nrof_flowrings
);
1383 count
= count
* sizeof(unsigned long);
1384 msgbuf
->flow_map
= kzalloc(count
, GFP_KERNEL
);
1385 if (!msgbuf
->flow_map
)
1388 msgbuf
->txstatus_done_map
= kzalloc(count
, GFP_KERNEL
);
1389 if (!msgbuf
->txstatus_done_map
)
1392 msgbuf
->drvr
= drvr
;
1393 msgbuf
->ioctbuf
= dma_alloc_coherent(drvr
->bus_if
->dev
,
1394 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1395 &msgbuf
->ioctbuf_handle
,
1397 if (!msgbuf
->ioctbuf
)
1399 address
= (u64
)msgbuf
->ioctbuf_handle
;
1400 msgbuf
->ioctbuf_phys_hi
= address
>> 32;
1401 msgbuf
->ioctbuf_phys_lo
= address
& 0xffffffff;
1403 drvr
->proto
->hdrpull
= brcmf_msgbuf_hdrpull
;
1404 drvr
->proto
->query_dcmd
= brcmf_msgbuf_query_dcmd
;
1405 drvr
->proto
->set_dcmd
= brcmf_msgbuf_set_dcmd
;
1406 drvr
->proto
->txdata
= brcmf_msgbuf_txdata
;
1407 drvr
->proto
->configure_addr_mode
= brcmf_msgbuf_configure_addr_mode
;
1408 drvr
->proto
->delete_peer
= brcmf_msgbuf_delete_peer
;
1409 drvr
->proto
->add_tdls_peer
= brcmf_msgbuf_add_tdls_peer
;
1410 drvr
->proto
->pd
= msgbuf
;
1412 init_waitqueue_head(&msgbuf
->ioctl_resp_wait
);
1414 msgbuf
->commonrings
=
1415 (struct brcmf_commonring
**)if_msgbuf
->commonrings
;
1416 msgbuf
->flowrings
= (struct brcmf_commonring
**)if_msgbuf
->flowrings
;
1417 msgbuf
->nrof_flowrings
= if_msgbuf
->nrof_flowrings
;
1418 msgbuf
->flowring_dma_handle
= kzalloc(msgbuf
->nrof_flowrings
*
1419 sizeof(*msgbuf
->flowring_dma_handle
), GFP_KERNEL
);
1420 if (!msgbuf
->flowring_dma_handle
)
1423 msgbuf
->rx_dataoffset
= if_msgbuf
->rx_dataoffset
;
1424 msgbuf
->max_rxbufpost
= if_msgbuf
->max_rxbufpost
;
1426 msgbuf
->max_ioctlrespbuf
= BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST
;
1427 msgbuf
->max_eventbuf
= BRCMF_MSGBUF_MAX_EVENTBUF_POST
;
1429 msgbuf
->tx_pktids
= brcmf_msgbuf_init_pktids(NR_TX_PKTIDS
,
1431 if (!msgbuf
->tx_pktids
)
1433 msgbuf
->rx_pktids
= brcmf_msgbuf_init_pktids(NR_RX_PKTIDS
,
1435 if (!msgbuf
->rx_pktids
)
1438 msgbuf
->flow
= brcmf_flowring_attach(drvr
->bus_if
->dev
,
1439 if_msgbuf
->nrof_flowrings
);
1444 brcmf_dbg(MSGBUF
, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1445 msgbuf
->max_rxbufpost
, msgbuf
->max_eventbuf
,
1446 msgbuf
->max_ioctlrespbuf
);
1449 brcmf_msgbuf_rxbuf_data_fill(msgbuf
);
1450 if (msgbuf
->max_rxbufpost
!= msgbuf
->rxbufpost
)
1455 } while (count
< 10);
1456 brcmf_msgbuf_rxbuf_event_post(msgbuf
);
1457 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf
);
1459 INIT_WORK(&msgbuf
->flowring_work
, brcmf_msgbuf_flowring_worker
);
1460 spin_lock_init(&msgbuf
->flowring_work_lock
);
1461 INIT_LIST_HEAD(&msgbuf
->work_queue
);
1467 kfree(msgbuf
->flow_map
);
1468 kfree(msgbuf
->txstatus_done_map
);
1469 brcmf_msgbuf_release_pktids(msgbuf
);
1470 kfree(msgbuf
->flowring_dma_handle
);
1471 if (msgbuf
->ioctbuf
)
1472 dma_free_coherent(drvr
->bus_if
->dev
,
1473 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1475 msgbuf
->ioctbuf_handle
);
1482 void brcmf_proto_msgbuf_detach(struct brcmf_pub
*drvr
)
1484 struct brcmf_msgbuf
*msgbuf
;
1485 struct brcmf_msgbuf_work_item
*work
;
1487 brcmf_dbg(TRACE
, "Enter\n");
1488 if (drvr
->proto
->pd
) {
1489 msgbuf
= (struct brcmf_msgbuf
*)drvr
->proto
->pd
;
1490 cancel_work_sync(&msgbuf
->flowring_work
);
1491 while (!list_empty(&msgbuf
->work_queue
)) {
1492 work
= list_first_entry(&msgbuf
->work_queue
,
1493 struct brcmf_msgbuf_work_item
,
1495 list_del(&work
->queue
);
1498 kfree(msgbuf
->flow_map
);
1499 kfree(msgbuf
->txstatus_done_map
);
1500 if (msgbuf
->txflow_wq
)
1501 destroy_workqueue(msgbuf
->txflow_wq
);
1503 brcmf_flowring_detach(msgbuf
->flow
);
1504 dma_free_coherent(drvr
->bus_if
->dev
,
1505 BRCMF_TX_IOCTL_MAX_MSG_SIZE
,
1506 msgbuf
->ioctbuf
, msgbuf
->ioctbuf_handle
);
1507 brcmf_msgbuf_release_pktids(msgbuf
);
1508 kfree(msgbuf
->flowring_dma_handle
);
1510 drvr
->proto
->pd
= NULL
;