]>
Commit | Line | Data |
---|---|---|
1 | /* Copyright (c) 2014 Broadcom Corporation | |
2 | * | |
3 | * Permission to use, copy, modify, and/or distribute this software for any | |
4 | * purpose with or without fee is hereby granted, provided that the above | |
5 | * copyright notice and this permission notice appear in all copies. | |
6 | * | |
7 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
8 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
9 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
10 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
11 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION | |
12 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN | |
13 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
14 | */ | |
15 | ||
16 | /******************************************************************************* | |
17 | * Communicates with the dongle by using dcmd codes. | |
18 | * For certain dcmd codes, the dongle interprets string data from the host. | |
19 | ******************************************************************************/ | |
20 | ||
21 | #include <linux/types.h> | |
22 | #include <linux/netdevice.h> | |
23 | ||
24 | #include <brcmu_utils.h> | |
25 | #include <brcmu_wifi.h> | |
26 | ||
27 | #include "dhd.h" | |
28 | #include "dhd_dbg.h" | |
29 | #include "proto.h" | |
30 | #include "msgbuf.h" | |
31 | #include "commonring.h" | |
32 | #include "flowring.h" | |
33 | #include "dhd_bus.h" | |
34 | #include "tracepoint.h" | |
35 | ||
36 | ||
37 | #define MSGBUF_IOCTL_RESP_TIMEOUT 2000 | |
38 | ||
39 | #define MSGBUF_TYPE_GEN_STATUS 0x1 | |
40 | #define MSGBUF_TYPE_RING_STATUS 0x2 | |
41 | #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 | |
42 | #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 | |
43 | #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 | |
44 | #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 | |
45 | #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 | |
46 | #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 | |
47 | #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 | |
48 | #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA | |
49 | #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB | |
50 | #define MSGBUF_TYPE_IOCTL_CMPLT 0xC | |
51 | #define MSGBUF_TYPE_EVENT_BUF_POST 0xD | |
52 | #define MSGBUF_TYPE_WL_EVENT 0xE | |
53 | #define MSGBUF_TYPE_TX_POST 0xF | |
54 | #define MSGBUF_TYPE_TX_STATUS 0x10 | |
55 | #define MSGBUF_TYPE_RXBUF_POST 0x11 | |
56 | #define MSGBUF_TYPE_RX_CMPLT 0x12 | |
57 | #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 | |
58 | #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 | |
59 | ||
60 | #define NR_TX_PKTIDS 2048 | |
61 | #define NR_RX_PKTIDS 1024 | |
62 | ||
63 | #define BRCMF_IOCTL_REQ_PKTID 0xFFFE | |
64 | ||
65 | #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 | |
66 | #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 | |
67 | #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 | |
68 | #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 | |
69 | ||
70 | #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 | |
71 | #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 | |
72 | ||
73 | #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 | |
74 | #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 | |
75 | ||
76 | ||
77 | struct msgbuf_common_hdr { | |
78 | u8 msgtype; | |
79 | u8 ifidx; | |
80 | u8 flags; | |
81 | u8 rsvd0; | |
82 | __le32 request_id; | |
83 | }; | |
84 | ||
85 | struct msgbuf_buf_addr { | |
86 | __le32 low_addr; | |
87 | __le32 high_addr; | |
88 | }; | |
89 | ||
90 | struct msgbuf_ioctl_req_hdr { | |
91 | struct msgbuf_common_hdr msg; | |
92 | __le32 cmd; | |
93 | __le16 trans_id; | |
94 | __le16 input_buf_len; | |
95 | __le16 output_buf_len; | |
96 | __le16 rsvd0[3]; | |
97 | struct msgbuf_buf_addr req_buf_addr; | |
98 | __le32 rsvd1[2]; | |
99 | }; | |
100 | ||
101 | struct msgbuf_tx_msghdr { | |
102 | struct msgbuf_common_hdr msg; | |
103 | u8 txhdr[ETH_HLEN]; | |
104 | u8 flags; | |
105 | u8 seg_cnt; | |
106 | struct msgbuf_buf_addr metadata_buf_addr; | |
107 | struct msgbuf_buf_addr data_buf_addr; | |
108 | __le16 metadata_buf_len; | |
109 | __le16 data_len; | |
110 | __le32 rsvd0; | |
111 | }; | |
112 | ||
113 | struct msgbuf_rx_bufpost { | |
114 | struct msgbuf_common_hdr msg; | |
115 | __le16 metadata_buf_len; | |
116 | __le16 data_buf_len; | |
117 | __le32 rsvd0; | |
118 | struct msgbuf_buf_addr metadata_buf_addr; | |
119 | struct msgbuf_buf_addr data_buf_addr; | |
120 | }; | |
121 | ||
122 | struct msgbuf_rx_ioctl_resp_or_event { | |
123 | struct msgbuf_common_hdr msg; | |
124 | __le16 host_buf_len; | |
125 | __le16 rsvd0[3]; | |
126 | struct msgbuf_buf_addr host_buf_addr; | |
127 | __le32 rsvd1[4]; | |
128 | }; | |
129 | ||
130 | struct msgbuf_completion_hdr { | |
131 | __le16 status; | |
132 | __le16 flow_ring_id; | |
133 | }; | |
134 | ||
135 | struct msgbuf_rx_event { | |
136 | struct msgbuf_common_hdr msg; | |
137 | struct msgbuf_completion_hdr compl_hdr; | |
138 | __le16 event_data_len; | |
139 | __le16 seqnum; | |
140 | __le16 rsvd0[4]; | |
141 | }; | |
142 | ||
143 | struct msgbuf_ioctl_resp_hdr { | |
144 | struct msgbuf_common_hdr msg; | |
145 | struct msgbuf_completion_hdr compl_hdr; | |
146 | __le16 resp_len; | |
147 | __le16 trans_id; | |
148 | __le32 cmd; | |
149 | __le32 rsvd0; | |
150 | }; | |
151 | ||
152 | struct msgbuf_tx_status { | |
153 | struct msgbuf_common_hdr msg; | |
154 | struct msgbuf_completion_hdr compl_hdr; | |
155 | __le16 metadata_len; | |
156 | __le16 tx_status; | |
157 | }; | |
158 | ||
159 | struct msgbuf_rx_complete { | |
160 | struct msgbuf_common_hdr msg; | |
161 | struct msgbuf_completion_hdr compl_hdr; | |
162 | __le16 metadata_len; | |
163 | __le16 data_len; | |
164 | __le16 data_offset; | |
165 | __le16 flags; | |
166 | __le32 rx_status_0; | |
167 | __le32 rx_status_1; | |
168 | __le32 rsvd0; | |
169 | }; | |
170 | ||
171 | struct msgbuf_tx_flowring_create_req { | |
172 | struct msgbuf_common_hdr msg; | |
173 | u8 da[ETH_ALEN]; | |
174 | u8 sa[ETH_ALEN]; | |
175 | u8 tid; | |
176 | u8 if_flags; | |
177 | __le16 flow_ring_id; | |
178 | u8 tc; | |
179 | u8 priority; | |
180 | __le16 int_vector; | |
181 | __le16 max_items; | |
182 | __le16 len_item; | |
183 | struct msgbuf_buf_addr flow_ring_addr; | |
184 | }; | |
185 | ||
186 | struct msgbuf_tx_flowring_delete_req { | |
187 | struct msgbuf_common_hdr msg; | |
188 | __le16 flow_ring_id; | |
189 | __le16 reason; | |
190 | __le32 rsvd0[7]; | |
191 | }; | |
192 | ||
193 | struct msgbuf_flowring_create_resp { | |
194 | struct msgbuf_common_hdr msg; | |
195 | struct msgbuf_completion_hdr compl_hdr; | |
196 | __le32 rsvd0[3]; | |
197 | }; | |
198 | ||
199 | struct msgbuf_flowring_delete_resp { | |
200 | struct msgbuf_common_hdr msg; | |
201 | struct msgbuf_completion_hdr compl_hdr; | |
202 | __le32 rsvd0[3]; | |
203 | }; | |
204 | ||
205 | struct msgbuf_flowring_flush_resp { | |
206 | struct msgbuf_common_hdr msg; | |
207 | struct msgbuf_completion_hdr compl_hdr; | |
208 | __le32 rsvd0[3]; | |
209 | }; | |
210 | ||
211 | struct brcmf_msgbuf { | |
212 | struct brcmf_pub *drvr; | |
213 | ||
214 | struct brcmf_commonring **commonrings; | |
215 | struct brcmf_commonring **flowrings; | |
216 | dma_addr_t *flowring_dma_handle; | |
217 | u16 nrof_flowrings; | |
218 | ||
219 | u16 rx_dataoffset; | |
220 | u32 max_rxbufpost; | |
221 | u16 rx_metadata_offset; | |
222 | u32 rxbufpost; | |
223 | ||
224 | u32 max_ioctlrespbuf; | |
225 | u32 cur_ioctlrespbuf; | |
226 | u32 max_eventbuf; | |
227 | u32 cur_eventbuf; | |
228 | ||
229 | void *ioctbuf; | |
230 | dma_addr_t ioctbuf_handle; | |
231 | u32 ioctbuf_phys_hi; | |
232 | u32 ioctbuf_phys_lo; | |
233 | u32 ioctl_resp_status; | |
234 | u32 ioctl_resp_ret_len; | |
235 | u32 ioctl_resp_pktid; | |
236 | ||
237 | u16 data_seq_no; | |
238 | u16 ioctl_seq_no; | |
239 | u32 reqid; | |
240 | wait_queue_head_t ioctl_resp_wait; | |
241 | bool ctl_completed; | |
242 | ||
243 | struct brcmf_msgbuf_pktids *tx_pktids; | |
244 | struct brcmf_msgbuf_pktids *rx_pktids; | |
245 | struct brcmf_flowring *flow; | |
246 | ||
247 | struct workqueue_struct *txflow_wq; | |
248 | struct work_struct txflow_work; | |
249 | unsigned long *flow_map; | |
250 | unsigned long *txstatus_done_map; | |
251 | }; | |
252 | ||
253 | struct brcmf_msgbuf_pktid { | |
254 | atomic_t allocated; | |
255 | u16 data_offset; | |
256 | struct sk_buff *skb; | |
257 | dma_addr_t physaddr; | |
258 | }; | |
259 | ||
260 | struct brcmf_msgbuf_pktids { | |
261 | u32 array_size; | |
262 | u32 last_allocated_idx; | |
263 | enum dma_data_direction direction; | |
264 | struct brcmf_msgbuf_pktid *array; | |
265 | }; | |
266 | ||
267 | ||
268 | /* dma flushing needs implementation for mips and arm platforms. Should | |
269 | * be put in util. Note, this is not real flushing. It is virtual non | |
270 | * cached memory. Only write buffers should have to be drained. Though | |
271 | * this may be different depending on platform...... | |
272 | */ | |
273 | #define brcmf_dma_flush(addr, len) | |
274 | #define brcmf_dma_invalidate_cache(addr, len) | |
275 | ||
276 | ||
277 | static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); | |
278 | ||
279 | ||
280 | static struct brcmf_msgbuf_pktids * | |
281 | brcmf_msgbuf_init_pktids(u32 nr_array_entries, | |
282 | enum dma_data_direction direction) | |
283 | { | |
284 | struct brcmf_msgbuf_pktid *array; | |
285 | struct brcmf_msgbuf_pktids *pktids; | |
286 | ||
287 | array = kcalloc(nr_array_entries, sizeof(*array), GFP_ATOMIC); | |
288 | if (!array) | |
289 | return NULL; | |
290 | ||
291 | pktids = kzalloc(sizeof(*pktids), GFP_ATOMIC); | |
292 | if (!pktids) { | |
293 | kfree(array); | |
294 | return NULL; | |
295 | } | |
296 | pktids->array = array; | |
297 | pktids->array_size = nr_array_entries; | |
298 | ||
299 | return pktids; | |
300 | } | |
301 | ||
302 | ||
303 | static int | |
304 | brcmf_msgbuf_alloc_pktid(struct device *dev, | |
305 | struct brcmf_msgbuf_pktids *pktids, | |
306 | struct sk_buff *skb, u16 data_offset, | |
307 | dma_addr_t *physaddr, u32 *idx) | |
308 | { | |
309 | struct brcmf_msgbuf_pktid *array; | |
310 | u32 count; | |
311 | ||
312 | array = pktids->array; | |
313 | ||
314 | *physaddr = dma_map_single(dev, skb->data + data_offset, | |
315 | skb->len - data_offset, pktids->direction); | |
316 | ||
317 | if (dma_mapping_error(dev, *physaddr)) { | |
318 | brcmf_err("dma_map_single failed !!\n"); | |
319 | return -ENOMEM; | |
320 | } | |
321 | ||
322 | *idx = pktids->last_allocated_idx; | |
323 | ||
324 | count = 0; | |
325 | do { | |
326 | (*idx)++; | |
327 | if (*idx == pktids->array_size) | |
328 | *idx = 0; | |
329 | if (array[*idx].allocated.counter == 0) | |
330 | if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) | |
331 | break; | |
332 | count++; | |
333 | } while (count < pktids->array_size); | |
334 | ||
335 | if (count == pktids->array_size) | |
336 | return -ENOMEM; | |
337 | ||
338 | array[*idx].data_offset = data_offset; | |
339 | array[*idx].physaddr = *physaddr; | |
340 | array[*idx].skb = skb; | |
341 | ||
342 | pktids->last_allocated_idx = *idx; | |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
347 | ||
348 | static struct sk_buff * | |
349 | brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, | |
350 | u32 idx) | |
351 | { | |
352 | struct brcmf_msgbuf_pktid *pktid; | |
353 | struct sk_buff *skb; | |
354 | ||
355 | if (idx >= pktids->array_size) { | |
356 | brcmf_err("Invalid packet id %d (max %d)\n", idx, | |
357 | pktids->array_size); | |
358 | return NULL; | |
359 | } | |
360 | if (pktids->array[idx].allocated.counter) { | |
361 | pktid = &pktids->array[idx]; | |
362 | dma_unmap_single(dev, pktid->physaddr, | |
363 | pktid->skb->len - pktid->data_offset, | |
364 | pktids->direction); | |
365 | skb = pktid->skb; | |
366 | pktid->allocated.counter = 0; | |
367 | return skb; | |
368 | } else { | |
369 | brcmf_err("Invalid packet id %d (not in use)\n", idx); | |
370 | } | |
371 | ||
372 | return NULL; | |
373 | } | |
374 | ||
375 | ||
376 | static void | |
377 | brcmf_msgbuf_release_array(struct device *dev, | |
378 | struct brcmf_msgbuf_pktids *pktids) | |
379 | { | |
380 | struct brcmf_msgbuf_pktid *array; | |
381 | struct brcmf_msgbuf_pktid *pktid; | |
382 | u32 count; | |
383 | ||
384 | array = pktids->array; | |
385 | count = 0; | |
386 | do { | |
387 | if (array[count].allocated.counter) { | |
388 | pktid = &array[count]; | |
389 | dma_unmap_single(dev, pktid->physaddr, | |
390 | pktid->skb->len - pktid->data_offset, | |
391 | pktids->direction); | |
392 | brcmu_pkt_buf_free_skb(pktid->skb); | |
393 | } | |
394 | count++; | |
395 | } while (count < pktids->array_size); | |
396 | ||
397 | kfree(array); | |
398 | kfree(pktids); | |
399 | } | |
400 | ||
401 | ||
402 | static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) | |
403 | { | |
404 | if (msgbuf->rx_pktids) | |
405 | brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, | |
406 | msgbuf->rx_pktids); | |
407 | if (msgbuf->tx_pktids) | |
408 | brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, | |
409 | msgbuf->tx_pktids); | |
410 | } | |
411 | ||
412 | ||
413 | static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, | |
414 | uint cmd, void *buf, uint len) | |
415 | { | |
416 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
417 | struct brcmf_commonring *commonring; | |
418 | struct msgbuf_ioctl_req_hdr *request; | |
419 | u16 buf_len; | |
420 | void *ret_ptr; | |
421 | int err; | |
422 | ||
423 | commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; | |
424 | brcmf_commonring_lock(commonring); | |
425 | ret_ptr = brcmf_commonring_reserve_for_write(commonring); | |
426 | if (!ret_ptr) { | |
427 | brcmf_err("Failed to reserve space in commonring\n"); | |
428 | brcmf_commonring_unlock(commonring); | |
429 | return -ENOMEM; | |
430 | } | |
431 | ||
432 | msgbuf->reqid++; | |
433 | ||
434 | request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; | |
435 | request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; | |
436 | request->msg.ifidx = (u8)ifidx; | |
437 | request->msg.flags = 0; | |
438 | request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); | |
439 | request->cmd = cpu_to_le32(cmd); | |
440 | request->output_buf_len = cpu_to_le16(len); | |
441 | request->trans_id = cpu_to_le16(msgbuf->reqid); | |
442 | ||
443 | buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); | |
444 | request->input_buf_len = cpu_to_le16(buf_len); | |
445 | request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); | |
446 | request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); | |
447 | if (buf) | |
448 | memcpy(msgbuf->ioctbuf, buf, buf_len); | |
449 | else | |
450 | memset(msgbuf->ioctbuf, 0, buf_len); | |
451 | brcmf_dma_flush(ioctl_buf, buf_len); | |
452 | ||
453 | err = brcmf_commonring_write_complete(commonring); | |
454 | brcmf_commonring_unlock(commonring); | |
455 | ||
456 | return err; | |
457 | } | |
458 | ||
459 | ||
460 | static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) | |
461 | { | |
462 | return wait_event_timeout(msgbuf->ioctl_resp_wait, | |
463 | msgbuf->ctl_completed, | |
464 | msecs_to_jiffies(MSGBUF_IOCTL_RESP_TIMEOUT)); | |
465 | } | |
466 | ||
467 | ||
468 | static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) | |
469 | { | |
470 | if (waitqueue_active(&msgbuf->ioctl_resp_wait)) { | |
471 | msgbuf->ctl_completed = true; | |
472 | wake_up(&msgbuf->ioctl_resp_wait); | |
473 | } | |
474 | } | |
475 | ||
476 | ||
477 | static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, | |
478 | uint cmd, void *buf, uint len) | |
479 | { | |
480 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
481 | struct sk_buff *skb = NULL; | |
482 | int timeout; | |
483 | int err; | |
484 | ||
485 | brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); | |
486 | msgbuf->ctl_completed = false; | |
487 | err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); | |
488 | if (err) | |
489 | return err; | |
490 | ||
491 | timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); | |
492 | if (!timeout) { | |
493 | brcmf_err("Timeout on response for query command\n"); | |
494 | return -EIO; | |
495 | } | |
496 | ||
497 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | |
498 | msgbuf->rx_pktids, | |
499 | msgbuf->ioctl_resp_pktid); | |
500 | if (msgbuf->ioctl_resp_ret_len != 0) { | |
501 | if (!skb) { | |
502 | brcmf_err("Invalid packet id idx recv'd %d\n", | |
503 | msgbuf->ioctl_resp_pktid); | |
504 | return -EBADF; | |
505 | } | |
506 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? | |
507 | len : msgbuf->ioctl_resp_ret_len); | |
508 | } | |
509 | if (skb) | |
510 | brcmu_pkt_buf_free_skb(skb); | |
511 | ||
512 | return msgbuf->ioctl_resp_status; | |
513 | } | |
514 | ||
515 | ||
516 | static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, | |
517 | uint cmd, void *buf, uint len) | |
518 | { | |
519 | return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len); | |
520 | } | |
521 | ||
522 | ||
523 | static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, | |
524 | u8 *ifidx, struct sk_buff *skb) | |
525 | { | |
526 | return -ENODEV; | |
527 | } | |
528 | ||
529 | ||
530 | static void | |
531 | brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) | |
532 | { | |
533 | u32 dma_sz; | |
534 | void *dma_buf; | |
535 | ||
536 | brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); | |
537 | ||
538 | dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; | |
539 | dma_buf = msgbuf->flowrings[flowid]->buf_addr; | |
540 | dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, | |
541 | msgbuf->flowring_dma_handle[flowid]); | |
542 | ||
543 | brcmf_flowring_delete(msgbuf->flow, flowid); | |
544 | } | |
545 | ||
546 | ||
547 | static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, | |
548 | struct sk_buff *skb) | |
549 | { | |
550 | struct msgbuf_tx_flowring_create_req *create; | |
551 | struct ethhdr *eh = (struct ethhdr *)(skb->data); | |
552 | struct brcmf_commonring *commonring; | |
553 | void *ret_ptr; | |
554 | u32 flowid; | |
555 | void *dma_buf; | |
556 | u32 dma_sz; | |
557 | long long address; | |
558 | int err; | |
559 | ||
560 | flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, | |
561 | skb->priority, ifidx); | |
562 | if (flowid == BRCMF_FLOWRING_INVALID_ID) | |
563 | return flowid; | |
564 | ||
565 | dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; | |
566 | ||
567 | dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, | |
568 | &msgbuf->flowring_dma_handle[flowid], | |
569 | GFP_ATOMIC); | |
570 | if (!dma_buf) { | |
571 | brcmf_err("dma_alloc_coherent failed\n"); | |
572 | brcmf_flowring_delete(msgbuf->flow, flowid); | |
573 | return BRCMF_FLOWRING_INVALID_ID; | |
574 | } | |
575 | ||
576 | brcmf_commonring_config(msgbuf->flowrings[flowid], | |
577 | BRCMF_H2D_TXFLOWRING_MAX_ITEM, | |
578 | BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); | |
579 | ||
580 | commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; | |
581 | brcmf_commonring_lock(commonring); | |
582 | ret_ptr = brcmf_commonring_reserve_for_write(commonring); | |
583 | if (!ret_ptr) { | |
584 | brcmf_err("Failed to reserve space in commonring\n"); | |
585 | brcmf_commonring_unlock(commonring); | |
586 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
587 | return BRCMF_FLOWRING_INVALID_ID; | |
588 | } | |
589 | ||
590 | create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; | |
591 | create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; | |
592 | create->msg.ifidx = ifidx; | |
593 | create->msg.request_id = 0; | |
594 | create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); | |
595 | create->flow_ring_id = cpu_to_le16(flowid + | |
596 | BRCMF_NROF_H2D_COMMON_MSGRINGS); | |
597 | memcpy(create->sa, eh->h_source, ETH_ALEN); | |
598 | memcpy(create->da, eh->h_dest, ETH_ALEN); | |
599 | address = (long long)(long)msgbuf->flowring_dma_handle[flowid]; | |
600 | create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); | |
601 | create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); | |
602 | create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); | |
603 | create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); | |
604 | ||
605 | brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", | |
606 | flowid, eh->h_dest, create->tid, ifidx); | |
607 | ||
608 | err = brcmf_commonring_write_complete(commonring); | |
609 | brcmf_commonring_unlock(commonring); | |
610 | if (err) { | |
611 | brcmf_err("Failed to write commonring\n"); | |
612 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
613 | return BRCMF_FLOWRING_INVALID_ID; | |
614 | } | |
615 | ||
616 | return flowid; | |
617 | } | |
618 | ||
619 | ||
620 | static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid) | |
621 | { | |
622 | struct brcmf_flowring *flow = msgbuf->flow; | |
623 | struct brcmf_commonring *commonring; | |
624 | void *ret_ptr; | |
625 | u32 count; | |
626 | struct sk_buff *skb; | |
627 | dma_addr_t physaddr; | |
628 | u32 pktid; | |
629 | struct msgbuf_tx_msghdr *tx_msghdr; | |
630 | long long address; | |
631 | ||
632 | commonring = msgbuf->flowrings[flowid]; | |
633 | if (!brcmf_commonring_write_available(commonring)) | |
634 | return; | |
635 | ||
636 | brcmf_commonring_lock(commonring); | |
637 | ||
638 | count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; | |
639 | while (brcmf_flowring_qlen(flow, flowid)) { | |
640 | skb = brcmf_flowring_dequeue(flow, flowid); | |
641 | if (skb == NULL) { | |
642 | brcmf_err("No SKB, but qlen %d\n", | |
643 | brcmf_flowring_qlen(flow, flowid)); | |
644 | break; | |
645 | } | |
646 | skb_orphan(skb); | |
647 | if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, | |
648 | msgbuf->tx_pktids, skb, ETH_HLEN, | |
649 | &physaddr, &pktid)) { | |
650 | brcmf_flowring_reinsert(flow, flowid, skb); | |
651 | brcmf_err("No PKTID available !!\n"); | |
652 | break; | |
653 | } | |
654 | ret_ptr = brcmf_commonring_reserve_for_write(commonring); | |
655 | if (!ret_ptr) { | |
656 | brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | |
657 | msgbuf->tx_pktids, pktid); | |
658 | brcmf_flowring_reinsert(flow, flowid, skb); | |
659 | break; | |
660 | } | |
661 | count++; | |
662 | ||
663 | tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; | |
664 | ||
665 | tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; | |
666 | tx_msghdr->msg.request_id = cpu_to_le32(pktid); | |
667 | tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); | |
668 | tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; | |
669 | tx_msghdr->flags |= (skb->priority & 0x07) << | |
670 | BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; | |
671 | tx_msghdr->seg_cnt = 1; | |
672 | memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); | |
673 | tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); | |
674 | address = (long long)(long)physaddr; | |
675 | tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); | |
676 | tx_msghdr->data_buf_addr.low_addr = | |
677 | cpu_to_le32(address & 0xffffffff); | |
678 | tx_msghdr->metadata_buf_len = 0; | |
679 | tx_msghdr->metadata_buf_addr.high_addr = 0; | |
680 | tx_msghdr->metadata_buf_addr.low_addr = 0; | |
681 | if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { | |
682 | brcmf_commonring_write_complete(commonring); | |
683 | count = 0; | |
684 | } | |
685 | } | |
686 | if (count) | |
687 | brcmf_commonring_write_complete(commonring); | |
688 | brcmf_commonring_unlock(commonring); | |
689 | } | |
690 | ||
691 | ||
692 | static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) | |
693 | { | |
694 | struct brcmf_msgbuf *msgbuf; | |
695 | u32 flowid; | |
696 | ||
697 | msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); | |
698 | for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) { | |
699 | clear_bit(flowid, msgbuf->flow_map); | |
700 | brcmf_msgbuf_txflow(msgbuf, flowid); | |
701 | } | |
702 | } | |
703 | ||
704 | ||
705 | static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid) | |
706 | { | |
707 | set_bit(flowid, msgbuf->flow_map); | |
708 | queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); | |
709 | ||
710 | return 0; | |
711 | } | |
712 | ||
713 | ||
714 | static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, | |
715 | u8 offset, struct sk_buff *skb) | |
716 | { | |
717 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
718 | struct brcmf_flowring *flow = msgbuf->flow; | |
719 | struct ethhdr *eh = (struct ethhdr *)(skb->data); | |
720 | u32 flowid; | |
721 | ||
722 | flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); | |
723 | if (flowid == BRCMF_FLOWRING_INVALID_ID) { | |
724 | flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); | |
725 | if (flowid == BRCMF_FLOWRING_INVALID_ID) | |
726 | return -ENOMEM; | |
727 | } | |
728 | brcmf_flowring_enqueue(flow, flowid, skb); | |
729 | brcmf_msgbuf_schedule_txdata(msgbuf, flowid); | |
730 | ||
731 | return 0; | |
732 | } | |
733 | ||
734 | ||
735 | static void | |
736 | brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, | |
737 | enum proto_addr_mode addr_mode) | |
738 | { | |
739 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
740 | ||
741 | brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); | |
742 | } | |
743 | ||
744 | ||
745 | static void | |
746 | brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) | |
747 | { | |
748 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
749 | ||
750 | brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); | |
751 | } | |
752 | ||
753 | ||
754 | static void | |
755 | brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) | |
756 | { | |
757 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
758 | ||
759 | brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); | |
760 | } | |
761 | ||
762 | ||
763 | static void | |
764 | brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |
765 | { | |
766 | struct msgbuf_ioctl_resp_hdr *ioctl_resp; | |
767 | ||
768 | ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; | |
769 | ||
770 | msgbuf->ioctl_resp_status = le16_to_cpu(ioctl_resp->compl_hdr.status); | |
771 | msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); | |
772 | msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); | |
773 | ||
774 | brcmf_msgbuf_ioctl_resp_wake(msgbuf); | |
775 | ||
776 | if (msgbuf->cur_ioctlrespbuf) | |
777 | msgbuf->cur_ioctlrespbuf--; | |
778 | brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); | |
779 | } | |
780 | ||
781 | ||
782 | static void | |
783 | brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) | |
784 | { | |
785 | struct msgbuf_tx_status *tx_status; | |
786 | u32 idx; | |
787 | struct sk_buff *skb; | |
788 | u16 flowid; | |
789 | ||
790 | tx_status = (struct msgbuf_tx_status *)buf; | |
791 | idx = le32_to_cpu(tx_status->msg.request_id); | |
792 | flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); | |
793 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
794 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | |
795 | msgbuf->tx_pktids, idx); | |
796 | if (!skb) { | |
797 | brcmf_err("Invalid packet id idx recv'd %d\n", idx); | |
798 | return; | |
799 | } | |
800 | ||
801 | set_bit(flowid, msgbuf->txstatus_done_map); | |
802 | ||
803 | brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true); | |
804 | } | |
805 | ||
806 | ||
807 | static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) | |
808 | { | |
809 | struct brcmf_commonring *commonring; | |
810 | void *ret_ptr; | |
811 | struct sk_buff *skb; | |
812 | u16 alloced; | |
813 | u32 pktlen; | |
814 | dma_addr_t physaddr; | |
815 | struct msgbuf_rx_bufpost *rx_bufpost; | |
816 | long long address; | |
817 | u32 pktid; | |
818 | u32 i; | |
819 | ||
820 | commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; | |
821 | ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, | |
822 | count, | |
823 | &alloced); | |
824 | if (!ret_ptr) { | |
825 | brcmf_err("Failed to reserve space in commonring\n"); | |
826 | return 0; | |
827 | } | |
828 | ||
829 | for (i = 0; i < alloced; i++) { | |
830 | rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; | |
831 | memset(rx_bufpost, 0, sizeof(*rx_bufpost)); | |
832 | ||
833 | skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); | |
834 | ||
835 | if (skb == NULL) { | |
836 | brcmf_err("Failed to alloc SKB\n"); | |
837 | brcmf_commonring_write_cancel(commonring, alloced - i); | |
838 | break; | |
839 | } | |
840 | ||
841 | pktlen = skb->len; | |
842 | if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, | |
843 | msgbuf->rx_pktids, skb, 0, | |
844 | &physaddr, &pktid)) { | |
845 | dev_kfree_skb_any(skb); | |
846 | brcmf_err("No PKTID available !!\n"); | |
847 | brcmf_commonring_write_cancel(commonring, alloced - i); | |
848 | break; | |
849 | } | |
850 | ||
851 | if (msgbuf->rx_metadata_offset) { | |
852 | address = (long long)(long)physaddr; | |
853 | rx_bufpost->metadata_buf_len = | |
854 | cpu_to_le16(msgbuf->rx_metadata_offset); | |
855 | rx_bufpost->metadata_buf_addr.high_addr = | |
856 | cpu_to_le32(address >> 32); | |
857 | rx_bufpost->metadata_buf_addr.low_addr = | |
858 | cpu_to_le32(address & 0xffffffff); | |
859 | ||
860 | skb_pull(skb, msgbuf->rx_metadata_offset); | |
861 | pktlen = skb->len; | |
862 | physaddr += msgbuf->rx_metadata_offset; | |
863 | } | |
864 | rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; | |
865 | rx_bufpost->msg.request_id = cpu_to_le32(pktid); | |
866 | ||
867 | address = (long long)(long)physaddr; | |
868 | rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); | |
869 | rx_bufpost->data_buf_addr.high_addr = | |
870 | cpu_to_le32(address >> 32); | |
871 | rx_bufpost->data_buf_addr.low_addr = | |
872 | cpu_to_le32(address & 0xffffffff); | |
873 | ||
874 | ret_ptr += brcmf_commonring_len_item(commonring); | |
875 | } | |
876 | ||
877 | if (i) | |
878 | brcmf_commonring_write_complete(commonring); | |
879 | ||
880 | return i; | |
881 | } | |
882 | ||
883 | ||
884 | static void | |
885 | brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) | |
886 | { | |
887 | u32 fillbufs; | |
888 | u32 retcount; | |
889 | ||
890 | fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; | |
891 | ||
892 | while (fillbufs) { | |
893 | retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); | |
894 | if (!retcount) | |
895 | break; | |
896 | msgbuf->rxbufpost += retcount; | |
897 | fillbufs -= retcount; | |
898 | } | |
899 | } | |
900 | ||
901 | ||
902 | static void | |
903 | brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) | |
904 | { | |
905 | msgbuf->rxbufpost -= rxcnt; | |
906 | if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - | |
907 | BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) | |
908 | brcmf_msgbuf_rxbuf_data_fill(msgbuf); | |
909 | } | |
910 | ||
911 | ||
912 | static u32 | |
913 | brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, | |
914 | u32 count) | |
915 | { | |
916 | struct brcmf_commonring *commonring; | |
917 | void *ret_ptr; | |
918 | struct sk_buff *skb; | |
919 | u16 alloced; | |
920 | u32 pktlen; | |
921 | dma_addr_t physaddr; | |
922 | struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; | |
923 | long long address; | |
924 | u32 pktid; | |
925 | u32 i; | |
926 | ||
927 | commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; | |
928 | brcmf_commonring_lock(commonring); | |
929 | ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, | |
930 | count, | |
931 | &alloced); | |
932 | if (!ret_ptr) { | |
933 | brcmf_err("Failed to reserve space in commonring\n"); | |
934 | brcmf_commonring_unlock(commonring); | |
935 | return 0; | |
936 | } | |
937 | ||
938 | for (i = 0; i < alloced; i++) { | |
939 | rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; | |
940 | memset(rx_bufpost, 0, sizeof(*rx_bufpost)); | |
941 | ||
942 | skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); | |
943 | ||
944 | if (skb == NULL) { | |
945 | brcmf_err("Failed to alloc SKB\n"); | |
946 | brcmf_commonring_write_cancel(commonring, alloced - i); | |
947 | break; | |
948 | } | |
949 | ||
950 | pktlen = skb->len; | |
951 | if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, | |
952 | msgbuf->rx_pktids, skb, 0, | |
953 | &physaddr, &pktid)) { | |
954 | dev_kfree_skb_any(skb); | |
955 | brcmf_err("No PKTID available !!\n"); | |
956 | brcmf_commonring_write_cancel(commonring, alloced - i); | |
957 | break; | |
958 | } | |
959 | if (event_buf) | |
960 | rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; | |
961 | else | |
962 | rx_bufpost->msg.msgtype = | |
963 | MSGBUF_TYPE_IOCTLRESP_BUF_POST; | |
964 | rx_bufpost->msg.request_id = cpu_to_le32(pktid); | |
965 | ||
966 | address = (long long)(long)physaddr; | |
967 | rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); | |
968 | rx_bufpost->host_buf_addr.high_addr = | |
969 | cpu_to_le32(address >> 32); | |
970 | rx_bufpost->host_buf_addr.low_addr = | |
971 | cpu_to_le32(address & 0xffffffff); | |
972 | ||
973 | ret_ptr += brcmf_commonring_len_item(commonring); | |
974 | } | |
975 | ||
976 | if (i) | |
977 | brcmf_commonring_write_complete(commonring); | |
978 | ||
979 | brcmf_commonring_unlock(commonring); | |
980 | ||
981 | return i; | |
982 | } | |
983 | ||
984 | ||
985 | static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) | |
986 | { | |
987 | u32 count; | |
988 | ||
989 | count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; | |
990 | count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); | |
991 | msgbuf->cur_ioctlrespbuf += count; | |
992 | } | |
993 | ||
994 | ||
995 | static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) | |
996 | { | |
997 | u32 count; | |
998 | ||
999 | count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; | |
1000 | count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); | |
1001 | msgbuf->cur_eventbuf += count; | |
1002 | } | |
1003 | ||
1004 | ||
1005 | static void | |
1006 | brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb, | |
1007 | u8 ifidx) | |
1008 | { | |
1009 | struct brcmf_if *ifp; | |
1010 | ||
1011 | ifp = msgbuf->drvr->iflist[ifidx]; | |
1012 | if (!ifp || !ifp->ndev) { | |
1013 | brcmu_pkt_buf_free_skb(skb); | |
1014 | return; | |
1015 | } | |
1016 | brcmf_netif_rx(ifp, skb); | |
1017 | } | |
1018 | ||
1019 | ||
1020 | static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) | |
1021 | { | |
1022 | struct msgbuf_rx_event *event; | |
1023 | u32 idx; | |
1024 | u16 buflen; | |
1025 | struct sk_buff *skb; | |
1026 | ||
1027 | event = (struct msgbuf_rx_event *)buf; | |
1028 | idx = le32_to_cpu(event->msg.request_id); | |
1029 | buflen = le16_to_cpu(event->event_data_len); | |
1030 | ||
1031 | if (msgbuf->cur_eventbuf) | |
1032 | msgbuf->cur_eventbuf--; | |
1033 | brcmf_msgbuf_rxbuf_event_post(msgbuf); | |
1034 | ||
1035 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | |
1036 | msgbuf->rx_pktids, idx); | |
1037 | if (!skb) | |
1038 | return; | |
1039 | ||
1040 | if (msgbuf->rx_dataoffset) | |
1041 | skb_pull(skb, msgbuf->rx_dataoffset); | |
1042 | ||
1043 | skb_trim(skb, buflen); | |
1044 | ||
1045 | brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx); | |
1046 | } | |
1047 | ||
1048 | ||
1049 | static void | |
1050 | brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |
1051 | { | |
1052 | struct msgbuf_rx_complete *rx_complete; | |
1053 | struct sk_buff *skb; | |
1054 | u16 data_offset; | |
1055 | u16 buflen; | |
1056 | u32 idx; | |
1057 | ||
1058 | brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); | |
1059 | ||
1060 | rx_complete = (struct msgbuf_rx_complete *)buf; | |
1061 | data_offset = le16_to_cpu(rx_complete->data_offset); | |
1062 | buflen = le16_to_cpu(rx_complete->data_len); | |
1063 | idx = le32_to_cpu(rx_complete->msg.request_id); | |
1064 | ||
1065 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | |
1066 | msgbuf->rx_pktids, idx); | |
1067 | ||
1068 | if (data_offset) | |
1069 | skb_pull(skb, data_offset); | |
1070 | else if (msgbuf->rx_dataoffset) | |
1071 | skb_pull(skb, msgbuf->rx_dataoffset); | |
1072 | ||
1073 | skb_trim(skb, buflen); | |
1074 | ||
1075 | brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx); | |
1076 | } | |
1077 | ||
1078 | ||
1079 | static void | |
1080 | brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, | |
1081 | void *buf) | |
1082 | { | |
1083 | struct msgbuf_flowring_create_resp *flowring_create_resp; | |
1084 | u16 status; | |
1085 | u16 flowid; | |
1086 | ||
1087 | flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; | |
1088 | ||
1089 | flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); | |
1090 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1091 | status = le16_to_cpu(flowring_create_resp->compl_hdr.status); | |
1092 | ||
1093 | if (status) { | |
1094 | brcmf_err("Flowring creation failed, code %d\n", status); | |
1095 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
1096 | return; | |
1097 | } | |
1098 | brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, | |
1099 | status); | |
1100 | ||
1101 | brcmf_flowring_open(msgbuf->flow, flowid); | |
1102 | ||
1103 | brcmf_msgbuf_schedule_txdata(msgbuf, flowid); | |
1104 | } | |
1105 | ||
1106 | ||
1107 | static void | |
1108 | brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, | |
1109 | void *buf) | |
1110 | { | |
1111 | struct msgbuf_flowring_delete_resp *flowring_delete_resp; | |
1112 | u16 status; | |
1113 | u16 flowid; | |
1114 | ||
1115 | flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; | |
1116 | ||
1117 | flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); | |
1118 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; | |
1119 | status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); | |
1120 | ||
1121 | if (status) { | |
1122 | brcmf_err("Flowring deletion failed, code %d\n", status); | |
1123 | brcmf_flowring_delete(msgbuf->flow, flowid); | |
1124 | return; | |
1125 | } | |
1126 | brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, | |
1127 | status); | |
1128 | ||
1129 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
1130 | } | |
1131 | ||
1132 | ||
1133 | static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) | |
1134 | { | |
1135 | struct msgbuf_common_hdr *msg; | |
1136 | ||
1137 | msg = (struct msgbuf_common_hdr *)buf; | |
1138 | switch (msg->msgtype) { | |
1139 | case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: | |
1140 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); | |
1141 | brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); | |
1142 | break; | |
1143 | case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: | |
1144 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); | |
1145 | brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); | |
1146 | break; | |
1147 | case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: | |
1148 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); | |
1149 | break; | |
1150 | case MSGBUF_TYPE_IOCTL_CMPLT: | |
1151 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); | |
1152 | brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); | |
1153 | break; | |
1154 | case MSGBUF_TYPE_WL_EVENT: | |
1155 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); | |
1156 | brcmf_msgbuf_process_event(msgbuf, buf); | |
1157 | break; | |
1158 | case MSGBUF_TYPE_TX_STATUS: | |
1159 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); | |
1160 | brcmf_msgbuf_process_txstatus(msgbuf, buf); | |
1161 | break; | |
1162 | case MSGBUF_TYPE_RX_CMPLT: | |
1163 | brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); | |
1164 | brcmf_msgbuf_process_rx_complete(msgbuf, buf); | |
1165 | break; | |
1166 | default: | |
1167 | brcmf_err("Unsupported msgtype %d\n", msg->msgtype); | |
1168 | break; | |
1169 | } | |
1170 | } | |
1171 | ||
1172 | ||
1173 | static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, | |
1174 | struct brcmf_commonring *commonring) | |
1175 | { | |
1176 | void *buf; | |
1177 | u16 count; | |
1178 | ||
1179 | again: | |
1180 | buf = brcmf_commonring_get_read_ptr(commonring, &count); | |
1181 | if (buf == NULL) | |
1182 | return; | |
1183 | ||
1184 | while (count) { | |
1185 | brcmf_msgbuf_process_msgtype(msgbuf, | |
1186 | buf + msgbuf->rx_dataoffset); | |
1187 | buf += brcmf_commonring_len_item(commonring); | |
1188 | count--; | |
1189 | } | |
1190 | brcmf_commonring_read_complete(commonring); | |
1191 | ||
1192 | if (commonring->r_ptr == 0) | |
1193 | goto again; | |
1194 | } | |
1195 | ||
1196 | ||
1197 | int brcmf_proto_msgbuf_rx_trigger(struct device *dev) | |
1198 | { | |
1199 | struct brcmf_bus *bus_if = dev_get_drvdata(dev); | |
1200 | struct brcmf_pub *drvr = bus_if->drvr; | |
1201 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
1202 | void *buf; | |
1203 | u32 flowid; | |
1204 | ||
1205 | buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; | |
1206 | brcmf_msgbuf_process_rx(msgbuf, buf); | |
1207 | buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; | |
1208 | brcmf_msgbuf_process_rx(msgbuf, buf); | |
1209 | buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; | |
1210 | brcmf_msgbuf_process_rx(msgbuf, buf); | |
1211 | ||
1212 | for_each_set_bit(flowid, msgbuf->txstatus_done_map, | |
1213 | msgbuf->nrof_flowrings) { | |
1214 | clear_bit(flowid, msgbuf->txstatus_done_map); | |
1215 | if (brcmf_flowring_qlen(msgbuf->flow, flowid)) | |
1216 | brcmf_msgbuf_schedule_txdata(msgbuf, flowid); | |
1217 | } | |
1218 | ||
1219 | return 0; | |
1220 | } | |
1221 | ||
1222 | ||
1223 | void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid) | |
1224 | { | |
1225 | struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
1226 | struct msgbuf_tx_flowring_delete_req *delete; | |
1227 | struct brcmf_commonring *commonring; | |
1228 | void *ret_ptr; | |
1229 | u8 ifidx; | |
1230 | int err; | |
1231 | ||
1232 | commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; | |
1233 | brcmf_commonring_lock(commonring); | |
1234 | ret_ptr = brcmf_commonring_reserve_for_write(commonring); | |
1235 | if (!ret_ptr) { | |
1236 | brcmf_err("FW unaware, flowring will be removed !!\n"); | |
1237 | brcmf_commonring_unlock(commonring); | |
1238 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
1239 | return; | |
1240 | } | |
1241 | ||
1242 | delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; | |
1243 | ||
1244 | ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); | |
1245 | ||
1246 | delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; | |
1247 | delete->msg.ifidx = ifidx; | |
1248 | delete->msg.request_id = 0; | |
1249 | ||
1250 | delete->flow_ring_id = cpu_to_le16(flowid + | |
1251 | BRCMF_NROF_H2D_COMMON_MSGRINGS); | |
1252 | delete->reason = 0; | |
1253 | ||
1254 | brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", | |
1255 | flowid, ifidx); | |
1256 | ||
1257 | err = brcmf_commonring_write_complete(commonring); | |
1258 | brcmf_commonring_unlock(commonring); | |
1259 | if (err) { | |
1260 | brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n"); | |
1261 | brcmf_msgbuf_remove_flowring(msgbuf, flowid); | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | ||
1266 | int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) | |
1267 | { | |
1268 | struct brcmf_bus_msgbuf *if_msgbuf; | |
1269 | struct brcmf_msgbuf *msgbuf; | |
1270 | long long address; | |
1271 | u32 count; | |
1272 | ||
1273 | if_msgbuf = drvr->bus_if->msgbuf; | |
1274 | msgbuf = kzalloc(sizeof(*msgbuf), GFP_ATOMIC); | |
1275 | if (!msgbuf) | |
1276 | goto fail; | |
1277 | ||
1278 | msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); | |
1279 | if (msgbuf->txflow_wq == NULL) { | |
1280 | brcmf_err("workqueue creation failed\n"); | |
1281 | goto fail; | |
1282 | } | |
1283 | INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); | |
1284 | count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings); | |
1285 | msgbuf->flow_map = kzalloc(count, GFP_ATOMIC); | |
1286 | if (!msgbuf->flow_map) | |
1287 | goto fail; | |
1288 | ||
1289 | msgbuf->txstatus_done_map = kzalloc(count, GFP_ATOMIC); | |
1290 | if (!msgbuf->txstatus_done_map) | |
1291 | goto fail; | |
1292 | ||
1293 | msgbuf->drvr = drvr; | |
1294 | msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, | |
1295 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, | |
1296 | &msgbuf->ioctbuf_handle, | |
1297 | GFP_ATOMIC); | |
1298 | if (!msgbuf->ioctbuf) | |
1299 | goto fail; | |
1300 | address = (long long)(long)msgbuf->ioctbuf_handle; | |
1301 | msgbuf->ioctbuf_phys_hi = address >> 32; | |
1302 | msgbuf->ioctbuf_phys_lo = address & 0xffffffff; | |
1303 | ||
1304 | drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; | |
1305 | drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; | |
1306 | drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; | |
1307 | drvr->proto->txdata = brcmf_msgbuf_txdata; | |
1308 | drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; | |
1309 | drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; | |
1310 | drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; | |
1311 | drvr->proto->pd = msgbuf; | |
1312 | ||
1313 | init_waitqueue_head(&msgbuf->ioctl_resp_wait); | |
1314 | ||
1315 | msgbuf->commonrings = | |
1316 | (struct brcmf_commonring **)if_msgbuf->commonrings; | |
1317 | msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; | |
1318 | msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; | |
1319 | msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * | |
1320 | sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC); | |
1321 | if (!msgbuf->flowring_dma_handle) | |
1322 | goto fail; | |
1323 | ||
1324 | msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; | |
1325 | msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; | |
1326 | ||
1327 | msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; | |
1328 | msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; | |
1329 | ||
1330 | msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, | |
1331 | DMA_TO_DEVICE); | |
1332 | if (!msgbuf->tx_pktids) | |
1333 | goto fail; | |
1334 | msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, | |
1335 | DMA_FROM_DEVICE); | |
1336 | if (!msgbuf->rx_pktids) | |
1337 | goto fail; | |
1338 | ||
1339 | msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, | |
1340 | if_msgbuf->nrof_flowrings); | |
1341 | if (!msgbuf->flow) | |
1342 | goto fail; | |
1343 | ||
1344 | ||
1345 | brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", | |
1346 | msgbuf->max_rxbufpost, msgbuf->max_eventbuf, | |
1347 | msgbuf->max_ioctlrespbuf); | |
1348 | count = 0; | |
1349 | do { | |
1350 | brcmf_msgbuf_rxbuf_data_fill(msgbuf); | |
1351 | if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) | |
1352 | msleep(10); | |
1353 | else | |
1354 | break; | |
1355 | count++; | |
1356 | } while (count < 10); | |
1357 | brcmf_msgbuf_rxbuf_event_post(msgbuf); | |
1358 | brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); | |
1359 | ||
1360 | return 0; | |
1361 | ||
1362 | fail: | |
1363 | if (msgbuf) { | |
1364 | kfree(msgbuf->flow_map); | |
1365 | kfree(msgbuf->txstatus_done_map); | |
1366 | brcmf_msgbuf_release_pktids(msgbuf); | |
1367 | kfree(msgbuf->flowring_dma_handle); | |
1368 | if (msgbuf->ioctbuf) | |
1369 | dma_free_coherent(drvr->bus_if->dev, | |
1370 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, | |
1371 | msgbuf->ioctbuf, | |
1372 | msgbuf->ioctbuf_handle); | |
1373 | kfree(msgbuf); | |
1374 | } | |
1375 | return -ENOMEM; | |
1376 | } | |
1377 | ||
1378 | ||
1379 | void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) | |
1380 | { | |
1381 | struct brcmf_msgbuf *msgbuf; | |
1382 | ||
1383 | brcmf_dbg(TRACE, "Enter\n"); | |
1384 | if (drvr->proto->pd) { | |
1385 | msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; | |
1386 | ||
1387 | kfree(msgbuf->flow_map); | |
1388 | kfree(msgbuf->txstatus_done_map); | |
1389 | if (msgbuf->txflow_wq) | |
1390 | destroy_workqueue(msgbuf->txflow_wq); | |
1391 | ||
1392 | brcmf_flowring_detach(msgbuf->flow); | |
1393 | dma_free_coherent(drvr->bus_if->dev, | |
1394 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, | |
1395 | msgbuf->ioctbuf, msgbuf->ioctbuf_handle); | |
1396 | brcmf_msgbuf_release_pktids(msgbuf); | |
1397 | kfree(msgbuf->flowring_dma_handle); | |
1398 | kfree(msgbuf); | |
1399 | drvr->proto->pd = NULL; | |
1400 | } | |
1401 | } |