]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/wireless/ath/ath6kl/htc.c
Merge remote branch 'wireless-next/master' into ath6kl-next
[mirror_ubuntu-zesty-kernel.git] / drivers / net / wireless / ath / ath6kl / htc.c
1 /*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "hif.h"
20 #include "debug.h"
21 #include "hif-ops.h"
22 #include <asm/unaligned.h>
23
24 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25
26 /* threshold to re-enable Tx bundling for an AC*/
27 #define TX_RESUME_BUNDLE_THRESHOLD 1500
28
29 /* Functions for Tx credit handling */
30 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
31 struct htc_endpoint_credit_dist *ep_dist,
32 int credits)
33 {
34 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
35 ep_dist->endpoint, credits);
36
37 ep_dist->credits += credits;
38 ep_dist->cred_assngd += credits;
39 cred_info->cur_free_credits -= credits;
40 }
41
42 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
43 struct list_head *ep_list,
44 int tot_credits)
45 {
46 struct htc_endpoint_credit_dist *cur_ep_dist;
47 int count;
48
49 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
50
51 cred_info->cur_free_credits = tot_credits;
52 cred_info->total_avail_credits = tot_credits;
53
54 list_for_each_entry(cur_ep_dist, ep_list, list) {
55 if (cur_ep_dist->endpoint == ENDPOINT_0)
56 continue;
57
58 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
59
60 if (tot_credits > 4) {
61 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
62 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
63 ath6kl_credit_deposit(cred_info,
64 cur_ep_dist,
65 cur_ep_dist->cred_min);
66 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
67 }
68 }
69
70 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
71 ath6kl_credit_deposit(cred_info, cur_ep_dist,
72 cur_ep_dist->cred_min);
73 /*
74 * Control service is always marked active, it
75 * never goes inactive EVER.
76 */
77 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
78 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
79 /* this is the lowest priority data endpoint */
80 /* FIXME: this looks fishy, check */
81 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
82
83 /*
84 * Streams have to be created (explicit | implicit) for all
85 * kinds of traffic. BE endpoints are also inactive in the
86 * beginning. When BE traffic starts it creates implicit
87 * streams that redistributes credits.
88 *
89 * Note: all other endpoints have minimums set but are
90 * initially given NO credits. credits will be distributed
91 * as traffic activity demands
92 */
93 }
94
95 WARN_ON(cred_info->cur_free_credits <= 0);
96
97 list_for_each_entry(cur_ep_dist, ep_list, list) {
98 if (cur_ep_dist->endpoint == ENDPOINT_0)
99 continue;
100
101 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
102 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
103 else {
104 /*
105 * For the remaining data endpoints, we assume that
106 * each cred_per_msg are the same. We use a simple
107 * calculation here, we take the remaining credits
108 * and determine how many max messages this can
109 * cover and then set each endpoint's normal value
110 * equal to 3/4 this amount.
111 */
112 count = (cred_info->cur_free_credits /
113 cur_ep_dist->cred_per_msg)
114 * cur_ep_dist->cred_per_msg;
115 count = (count * 3) >> 2;
116 count = max(count, cur_ep_dist->cred_per_msg);
117 cur_ep_dist->cred_norm = count;
118
119 }
120
121 ath6kl_dbg(ATH6KL_DBG_CREDIT,
122 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
123 cur_ep_dist->endpoint,
124 cur_ep_dist->svc_id,
125 cur_ep_dist->credits,
126 cur_ep_dist->cred_per_msg,
127 cur_ep_dist->cred_norm,
128 cur_ep_dist->cred_min);
129 }
130 }
131
132 /* initialize and setup credit distribution */
133 int ath6kl_credit_setup(void *htc_handle,
134 struct ath6kl_htc_credit_info *cred_info)
135 {
136 u16 servicepriority[5];
137
138 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
139
140 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
141 servicepriority[1] = WMI_DATA_VO_SVC;
142 servicepriority[2] = WMI_DATA_VI_SVC;
143 servicepriority[3] = WMI_DATA_BE_SVC;
144 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
145
146 /* set priority list */
147 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
148
149 return 0;
150 }
151
152 /* reduce an ep's credits back to a set limit */
153 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
154 struct htc_endpoint_credit_dist *ep_dist,
155 int limit)
156 {
157 int credits;
158
159 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
160 ep_dist->endpoint, limit);
161
162 ep_dist->cred_assngd = limit;
163
164 if (ep_dist->credits <= limit)
165 return;
166
167 credits = ep_dist->credits - limit;
168 ep_dist->credits -= credits;
169 cred_info->cur_free_credits += credits;
170 }
171
172 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
173 struct list_head *epdist_list)
174 {
175 struct htc_endpoint_credit_dist *cur_dist_list;
176
177 list_for_each_entry(cur_dist_list, epdist_list, list) {
178 if (cur_dist_list->endpoint == ENDPOINT_0)
179 continue;
180
181 if (cur_dist_list->cred_to_dist > 0) {
182 cur_dist_list->credits +=
183 cur_dist_list->cred_to_dist;
184 cur_dist_list->cred_to_dist = 0;
185 if (cur_dist_list->credits >
186 cur_dist_list->cred_assngd)
187 ath6kl_credit_reduce(cred_info,
188 cur_dist_list,
189 cur_dist_list->cred_assngd);
190
191 if (cur_dist_list->credits >
192 cur_dist_list->cred_norm)
193 ath6kl_credit_reduce(cred_info, cur_dist_list,
194 cur_dist_list->cred_norm);
195
196 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
197 if (cur_dist_list->txq_depth == 0)
198 ath6kl_credit_reduce(cred_info,
199 cur_dist_list, 0);
200 }
201 }
202 }
203 }
204
205 /*
206 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
207 * question.
208 */
209 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
210 struct htc_endpoint_credit_dist *ep_dist)
211 {
212 struct htc_endpoint_credit_dist *curdist_list;
213 int credits = 0;
214 int need;
215
216 if (ep_dist->svc_id == WMI_CONTROL_SVC)
217 goto out;
218
219 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
220 (ep_dist->svc_id == WMI_DATA_VO_SVC))
221 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
222 goto out;
223
224 /*
225 * For all other services, we follow a simple algorithm of:
226 *
227 * 1. checking the free pool for credits
228 * 2. checking lower priority endpoints for credits to take
229 */
230
231 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
232
233 if (credits >= ep_dist->seek_cred)
234 goto out;
235
236 /*
237 * We don't have enough in the free pool, try taking away from
238 * lower priority services The rule for taking away credits:
239 *
240 * 1. Only take from lower priority endpoints
241 * 2. Only take what is allocated above the minimum (never
242 * starve an endpoint completely)
243 * 3. Only take what you need.
244 */
245
246 list_for_each_entry_reverse(curdist_list,
247 &cred_info->lowestpri_ep_dist,
248 list) {
249 if (curdist_list == ep_dist)
250 break;
251
252 need = ep_dist->seek_cred - cred_info->cur_free_credits;
253
254 if ((curdist_list->cred_assngd - need) >=
255 curdist_list->cred_min) {
256 /*
257 * The current one has been allocated more than
258 * it's minimum and it has enough credits assigned
259 * above it's minimum to fulfill our need try to
260 * take away just enough to fulfill our need.
261 */
262 ath6kl_credit_reduce(cred_info, curdist_list,
263 curdist_list->cred_assngd - need);
264
265 if (cred_info->cur_free_credits >=
266 ep_dist->seek_cred)
267 break;
268 }
269
270 if (curdist_list->endpoint == ENDPOINT_0)
271 break;
272 }
273
274 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
275
276 out:
277 /* did we find some credits? */
278 if (credits)
279 ath6kl_credit_deposit(cred_info, ep_dist, credits);
280
281 ep_dist->seek_cred = 0;
282 }
283
284 /* redistribute credits based on activity change */
285 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
286 struct list_head *ep_dist_list)
287 {
288 struct htc_endpoint_credit_dist *curdist_list;
289
290 list_for_each_entry(curdist_list, ep_dist_list, list) {
291 if (curdist_list->endpoint == ENDPOINT_0)
292 continue;
293
294 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
295 (curdist_list->svc_id == WMI_DATA_BE_SVC))
296 curdist_list->dist_flags |= HTC_EP_ACTIVE;
297
298 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
299 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
300 if (curdist_list->txq_depth == 0)
301 ath6kl_credit_reduce(info, curdist_list, 0);
302 else
303 ath6kl_credit_reduce(info,
304 curdist_list,
305 curdist_list->cred_min);
306 }
307 }
308 }
309
310 /*
311 *
312 * This function is invoked whenever endpoints require credit
313 * distributions. A lock is held while this function is invoked, this
314 * function shall NOT block. The ep_dist_list is a list of distribution
315 * structures in prioritized order as defined by the call to the
316 * htc_set_credit_dist() api.
317 */
318 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
319 struct list_head *ep_dist_list,
320 enum htc_credit_dist_reason reason)
321 {
322 switch (reason) {
323 case HTC_CREDIT_DIST_SEND_COMPLETE:
324 ath6kl_credit_update(cred_info, ep_dist_list);
325 break;
326 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
327 ath6kl_credit_redistribute(cred_info, ep_dist_list);
328 break;
329 default:
330 break;
331 }
332
333 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
334 WARN_ON(cred_info->cur_free_credits < 0);
335 }
336
337 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
338 {
339 u8 *align_addr;
340
341 if (!IS_ALIGNED((unsigned long) *buf, 4)) {
342 align_addr = PTR_ALIGN(*buf - 4, 4);
343 memmove(align_addr, *buf, len);
344 *buf = align_addr;
345 }
346 }
347
348 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
349 int ctrl0, int ctrl1)
350 {
351 struct htc_frame_hdr *hdr;
352
353 packet->buf -= HTC_HDR_LENGTH;
354 hdr = (struct htc_frame_hdr *)packet->buf;
355
356 /* Endianess? */
357 put_unaligned((u16)packet->act_len, &hdr->payld_len);
358 hdr->flags = flags;
359 hdr->eid = packet->endpoint;
360 hdr->ctrl[0] = ctrl0;
361 hdr->ctrl[1] = ctrl1;
362 }
363
364 static void htc_reclaim_txctrl_buf(struct htc_target *target,
365 struct htc_packet *pkt)
366 {
367 spin_lock_bh(&target->htc_lock);
368 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
369 spin_unlock_bh(&target->htc_lock);
370 }
371
372 static struct htc_packet *htc_get_control_buf(struct htc_target *target,
373 bool tx)
374 {
375 struct htc_packet *packet = NULL;
376 struct list_head *buf_list;
377
378 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
379
380 spin_lock_bh(&target->htc_lock);
381
382 if (list_empty(buf_list)) {
383 spin_unlock_bh(&target->htc_lock);
384 return NULL;
385 }
386
387 packet = list_first_entry(buf_list, struct htc_packet, list);
388 list_del(&packet->list);
389 spin_unlock_bh(&target->htc_lock);
390
391 if (tx)
392 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
393
394 return packet;
395 }
396
397 static void htc_tx_comp_update(struct htc_target *target,
398 struct htc_endpoint *endpoint,
399 struct htc_packet *packet)
400 {
401 packet->completion = NULL;
402 packet->buf += HTC_HDR_LENGTH;
403
404 if (!packet->status)
405 return;
406
407 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
408 packet->status, packet->endpoint, packet->act_len,
409 packet->info.tx.cred_used);
410
411 /* on failure to submit, reclaim credits for this packet */
412 spin_lock_bh(&target->tx_lock);
413 endpoint->cred_dist.cred_to_dist +=
414 packet->info.tx.cred_used;
415 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
416
417 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
418 target->credit_info, &target->cred_dist_list);
419
420 ath6kl_credit_distribute(target->credit_info,
421 &target->cred_dist_list,
422 HTC_CREDIT_DIST_SEND_COMPLETE);
423
424 spin_unlock_bh(&target->tx_lock);
425 }
426
427 static void htc_tx_complete(struct htc_endpoint *endpoint,
428 struct list_head *txq)
429 {
430 if (list_empty(txq))
431 return;
432
433 ath6kl_dbg(ATH6KL_DBG_HTC,
434 "htc tx complete ep %d pkts %d\n",
435 endpoint->eid, get_queue_depth(txq));
436
437 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
438 }
439
440 static void htc_tx_comp_handler(struct htc_target *target,
441 struct htc_packet *packet)
442 {
443 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
444 struct list_head container;
445
446 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
447 packet->info.tx.seqno);
448
449 htc_tx_comp_update(target, endpoint, packet);
450 INIT_LIST_HEAD(&container);
451 list_add_tail(&packet->list, &container);
452 /* do completion */
453 htc_tx_complete(endpoint, &container);
454 }
455
456 static void htc_async_tx_scat_complete(struct htc_target *target,
457 struct hif_scatter_req *scat_req)
458 {
459 struct htc_endpoint *endpoint;
460 struct htc_packet *packet;
461 struct list_head tx_compq;
462 int i;
463
464 INIT_LIST_HEAD(&tx_compq);
465
466 ath6kl_dbg(ATH6KL_DBG_HTC,
467 "htc tx scat complete len %d entries %d\n",
468 scat_req->len, scat_req->scat_entries);
469
470 if (scat_req->status)
471 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
472
473 packet = scat_req->scat_list[0].packet;
474 endpoint = &target->endpoint[packet->endpoint];
475
476 /* walk through the scatter list and process */
477 for (i = 0; i < scat_req->scat_entries; i++) {
478 packet = scat_req->scat_list[i].packet;
479 if (!packet) {
480 WARN_ON(1);
481 return;
482 }
483
484 packet->status = scat_req->status;
485 htc_tx_comp_update(target, endpoint, packet);
486 list_add_tail(&packet->list, &tx_compq);
487 }
488
489 /* free scatter request */
490 hif_scatter_req_add(target->dev->ar, scat_req);
491
492 /* complete all packets */
493 htc_tx_complete(endpoint, &tx_compq);
494 }
495
496 static int ath6kl_htc_tx_issue(struct htc_target *target,
497 struct htc_packet *packet)
498 {
499 int status;
500 bool sync = false;
501 u32 padded_len, send_len;
502
503 if (!packet->completion)
504 sync = true;
505
506 send_len = packet->act_len + HTC_HDR_LENGTH;
507
508 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
509
510 ath6kl_dbg(ATH6KL_DBG_HTC,
511 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
512 send_len, packet->info.tx.seqno, padded_len,
513 target->dev->ar->mbox_info.htc_addr,
514 sync ? "sync" : "async");
515
516 if (sync) {
517 status = hif_read_write_sync(target->dev->ar,
518 target->dev->ar->mbox_info.htc_addr,
519 packet->buf, padded_len,
520 HIF_WR_SYNC_BLOCK_INC);
521
522 packet->status = status;
523 packet->buf += HTC_HDR_LENGTH;
524 } else
525 status = hif_write_async(target->dev->ar,
526 target->dev->ar->mbox_info.htc_addr,
527 packet->buf, padded_len,
528 HIF_WR_ASYNC_BLOCK_INC, packet);
529
530 return status;
531 }
532
533 static int htc_check_credits(struct htc_target *target,
534 struct htc_endpoint *ep, u8 *flags,
535 enum htc_endpoint_id eid, unsigned int len,
536 int *req_cred)
537 {
538
539 *req_cred = (len > target->tgt_cred_sz) ?
540 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
541
542 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
543 *req_cred, ep->cred_dist.credits);
544
545 if (ep->cred_dist.credits < *req_cred) {
546 if (eid == ENDPOINT_0)
547 return -EINVAL;
548
549 /* Seek more credits */
550 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
551
552 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
553
554 ep->cred_dist.seek_cred = 0;
555
556 if (ep->cred_dist.credits < *req_cred) {
557 ath6kl_dbg(ATH6KL_DBG_CREDIT,
558 "credit not found for ep %d\n",
559 eid);
560 return -EINVAL;
561 }
562 }
563
564 ep->cred_dist.credits -= *req_cred;
565 ep->ep_st.cred_cosumd += *req_cred;
566
567 /* When we are getting low on credits, ask for more */
568 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
569 ep->cred_dist.seek_cred =
570 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
571
572 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
573
574 /* see if we were successful in getting more */
575 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
576 /* tell the target we need credits ASAP! */
577 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
578 ep->ep_st.cred_low_indicate += 1;
579 ath6kl_dbg(ATH6KL_DBG_CREDIT,
580 "credit we need credits asap\n");
581 }
582 }
583
584 return 0;
585 }
586
587 static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
588 struct htc_endpoint *endpoint,
589 struct list_head *queue)
590 {
591 int req_cred;
592 u8 flags;
593 struct htc_packet *packet;
594 unsigned int len;
595
596 while (true) {
597
598 flags = 0;
599
600 if (list_empty(&endpoint->txq))
601 break;
602 packet = list_first_entry(&endpoint->txq, struct htc_packet,
603 list);
604
605 ath6kl_dbg(ATH6KL_DBG_HTC,
606 "htc tx got packet 0x%p queue depth %d\n",
607 packet, get_queue_depth(&endpoint->txq));
608
609 len = CALC_TXRX_PADDED_LEN(target,
610 packet->act_len + HTC_HDR_LENGTH);
611
612 if (htc_check_credits(target, endpoint, &flags,
613 packet->endpoint, len, &req_cred))
614 break;
615
616 /* now we can fully move onto caller's queue */
617 packet = list_first_entry(&endpoint->txq, struct htc_packet,
618 list);
619 list_move_tail(&packet->list, queue);
620
621 /* save the number of credits this packet consumed */
622 packet->info.tx.cred_used = req_cred;
623
624 /* all TX packets are handled asynchronously */
625 packet->completion = htc_tx_comp_handler;
626 packet->context = target;
627 endpoint->ep_st.tx_issued += 1;
628
629 /* save send flags */
630 packet->info.tx.flags = flags;
631 packet->info.tx.seqno = endpoint->seqno;
632 endpoint->seqno++;
633 }
634 }
635
636 /* See if the padded tx length falls on a credit boundary */
637 static int htc_get_credit_padding(unsigned int cred_sz, int *len,
638 struct htc_endpoint *ep)
639 {
640 int rem_cred, cred_pad;
641
642 rem_cred = *len % cred_sz;
643
644 /* No padding needed */
645 if (!rem_cred)
646 return 0;
647
648 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
649 return -1;
650
651 /*
652 * The transfer consumes a "partial" credit, this
653 * packet cannot be bundled unless we add
654 * additional "dummy" padding (max 255 bytes) to
655 * consume the entire credit.
656 */
657 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
658
659 if ((cred_pad > 0) && (cred_pad <= 255))
660 *len += cred_pad;
661 else
662 /* The amount of padding is too large, send as non-bundled */
663 return -1;
664
665 return cred_pad;
666 }
667
668 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
669 struct htc_endpoint *endpoint,
670 struct hif_scatter_req *scat_req,
671 int n_scat,
672 struct list_head *queue)
673 {
674 struct htc_packet *packet;
675 int i, len, rem_scat, cred_pad;
676 int status = 0;
677
678 rem_scat = target->max_tx_bndl_sz;
679
680 for (i = 0; i < n_scat; i++) {
681 scat_req->scat_list[i].packet = NULL;
682
683 if (list_empty(queue))
684 break;
685
686 packet = list_first_entry(queue, struct htc_packet, list);
687 len = CALC_TXRX_PADDED_LEN(target,
688 packet->act_len + HTC_HDR_LENGTH);
689
690 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
691 &len, endpoint);
692 if (cred_pad < 0 || rem_scat < len) {
693 status = -ENOSPC;
694 break;
695 }
696
697 rem_scat -= len;
698 /* now remove it from the queue */
699 list_del(&packet->list);
700
701 scat_req->scat_list[i].packet = packet;
702 /* prepare packet and flag message as part of a send bundle */
703 ath6kl_htc_tx_prep_pkt(packet,
704 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
705 cred_pad, packet->info.tx.seqno);
706 /* Make sure the buffer is 4-byte aligned */
707 ath6kl_htc_tx_buf_align(&packet->buf,
708 packet->act_len + HTC_HDR_LENGTH);
709 scat_req->scat_list[i].buf = packet->buf;
710 scat_req->scat_list[i].len = len;
711
712 scat_req->len += len;
713 scat_req->scat_entries++;
714 ath6kl_dbg(ATH6KL_DBG_HTC,
715 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
716 i, packet, packet->info.tx.seqno, len, rem_scat);
717 }
718
719 /* Roll back scatter setup in case of any failure */
720 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) {
721 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
722 packet = scat_req->scat_list[i].packet;
723 if (packet) {
724 packet->buf += HTC_HDR_LENGTH;
725 list_add(&packet->list, queue);
726 }
727 }
728 return -EAGAIN;
729 }
730
731 return status;
732 }
733
734 /*
735 * Drain a queue and send as bundles this function may return without fully
736 * draining the queue when
737 *
738 * 1. scatter resources are exhausted
739 * 2. a message that will consume a partial credit will stop the
740 * bundling process early
741 * 3. we drop below the minimum number of messages for a bundle
742 */
743 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
744 struct list_head *queue,
745 int *sent_bundle, int *n_bundle_pkts)
746 {
747 struct htc_target *target = endpoint->target;
748 struct hif_scatter_req *scat_req = NULL;
749 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
750 int status;
751 u32 txb_mask;
752 u8 ac = WMM_NUM_AC;
753
754 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
755 (WMI_CONTROL_SVC != endpoint->svc_id))
756 ac = target->dev->ar->ep2ac_map[endpoint->eid];
757
758 while (true) {
759 status = 0;
760 n_scat = get_queue_depth(queue);
761 n_scat = min(n_scat, target->msg_per_bndl_max);
762
763 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
764 /* not enough to bundle */
765 break;
766
767 scat_req = hif_scatter_req_get(target->dev->ar);
768
769 if (!scat_req) {
770 /* no scatter resources */
771 ath6kl_dbg(ATH6KL_DBG_HTC,
772 "htc tx no more scatter resources\n");
773 break;
774 }
775
776 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) {
777 if (WMM_AC_BE == ac)
778 /*
779 * BE, BK have priorities and bit
780 * positions reversed
781 */
782 txb_mask = (1 << WMM_AC_BK);
783 else
784 /*
785 * any AC with priority lower than
786 * itself
787 */
788 txb_mask = ((1 << ac) - 1);
789 /*
790 * when the scatter request resources drop below a
791 * certain threshold, disable Tx bundling for all
792 * AC's with priority lower than the current requesting
793 * AC. Otherwise re-enable Tx bundling for them
794 */
795 if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
796 target->tx_bndl_mask &= ~txb_mask;
797 else
798 target->tx_bndl_mask |= txb_mask;
799 }
800
801 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
802 n_scat);
803
804 scat_req->len = 0;
805 scat_req->scat_entries = 0;
806
807 status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
808 scat_req, n_scat,
809 queue);
810 if (status == -EAGAIN) {
811 hif_scatter_req_add(target->dev->ar, scat_req);
812 break;
813 }
814
815 /* send path is always asynchronous */
816 scat_req->complete = htc_async_tx_scat_complete;
817 n_sent_bundle++;
818 tot_pkts_bundle += scat_req->scat_entries;
819
820 ath6kl_dbg(ATH6KL_DBG_HTC,
821 "htc tx scatter bytes %d entries %d\n",
822 scat_req->len, scat_req->scat_entries);
823 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
824
825 if (status)
826 break;
827 }
828
829 *sent_bundle = n_sent_bundle;
830 *n_bundle_pkts = tot_pkts_bundle;
831 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
832 n_sent_bundle);
833
834 return;
835 }
836
837 static void ath6kl_htc_tx_from_queue(struct htc_target *target,
838 struct htc_endpoint *endpoint)
839 {
840 struct list_head txq;
841 struct htc_packet *packet;
842 int bundle_sent;
843 int n_pkts_bundle;
844 u8 ac = WMM_NUM_AC;
845
846 spin_lock_bh(&target->tx_lock);
847
848 endpoint->tx_proc_cnt++;
849 if (endpoint->tx_proc_cnt > 1) {
850 endpoint->tx_proc_cnt--;
851 spin_unlock_bh(&target->tx_lock);
852 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
853 return;
854 }
855
856 /*
857 * drain the endpoint TX queue for transmission as long
858 * as we have enough credits.
859 */
860 INIT_LIST_HEAD(&txq);
861
862 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) ||
863 (WMI_CONTROL_SVC != endpoint->svc_id))
864 ac = target->dev->ar->ep2ac_map[endpoint->eid];
865
866 while (true) {
867
868 if (list_empty(&endpoint->txq))
869 break;
870
871 ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
872
873 if (list_empty(&txq))
874 break;
875
876 spin_unlock_bh(&target->tx_lock);
877
878 bundle_sent = 0;
879 n_pkts_bundle = 0;
880
881 while (true) {
882 /* try to send a bundle on each pass */
883 if ((target->tx_bndl_mask) &&
884 (get_queue_depth(&txq) >=
885 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
886 int temp1 = 0, temp2 = 0;
887
888 /* check if bundling is enabled for an AC */
889 if (target->tx_bndl_mask & (1 << ac)) {
890 ath6kl_htc_tx_bundle(endpoint, &txq,
891 &temp1, &temp2);
892 bundle_sent += temp1;
893 n_pkts_bundle += temp2;
894 }
895 }
896
897 if (list_empty(&txq))
898 break;
899
900 packet = list_first_entry(&txq, struct htc_packet,
901 list);
902 list_del(&packet->list);
903
904 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags,
905 0, packet->info.tx.seqno);
906 ath6kl_htc_tx_issue(target, packet);
907 }
908
909 spin_lock_bh(&target->tx_lock);
910
911 endpoint->ep_st.tx_bundles += bundle_sent;
912 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
913
914 /*
915 * if an AC has bundling disabled and no tx bundling
916 * has occured continously for a certain number of TX,
917 * enable tx bundling for this AC
918 */
919 if (!bundle_sent) {
920 if (!(target->tx_bndl_mask & (1 << ac)) &&
921 (ac < WMM_NUM_AC)) {
922 if (++target->ac_tx_count[ac] >=
923 TX_RESUME_BUNDLE_THRESHOLD) {
924 target->ac_tx_count[ac] = 0;
925 target->tx_bndl_mask |= (1 << ac);
926 }
927 }
928 } else {
929 /* tx bundling will reset the counter */
930 if (ac < WMM_NUM_AC)
931 target->ac_tx_count[ac] = 0;
932 }
933 }
934
935 endpoint->tx_proc_cnt = 0;
936 spin_unlock_bh(&target->tx_lock);
937 }
938
939 static bool ath6kl_htc_tx_try(struct htc_target *target,
940 struct htc_endpoint *endpoint,
941 struct htc_packet *tx_pkt)
942 {
943 struct htc_ep_callbacks ep_cb;
944 int txq_depth;
945 bool overflow = false;
946
947 ep_cb = endpoint->ep_cb;
948
949 spin_lock_bh(&target->tx_lock);
950 txq_depth = get_queue_depth(&endpoint->txq);
951 spin_unlock_bh(&target->tx_lock);
952
953 if (txq_depth >= endpoint->max_txq_depth)
954 overflow = true;
955
956 if (overflow)
957 ath6kl_dbg(ATH6KL_DBG_HTC,
958 "htc tx overflow ep %d depth %d max %d\n",
959 endpoint->eid, txq_depth,
960 endpoint->max_txq_depth);
961
962 if (overflow && ep_cb.tx_full) {
963 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
964 HTC_SEND_FULL_DROP) {
965 endpoint->ep_st.tx_dropped += 1;
966 return false;
967 }
968 }
969
970 spin_lock_bh(&target->tx_lock);
971 list_add_tail(&tx_pkt->list, &endpoint->txq);
972 spin_unlock_bh(&target->tx_lock);
973
974 ath6kl_htc_tx_from_queue(target, endpoint);
975
976 return true;
977 }
978
979 static void htc_chk_ep_txq(struct htc_target *target)
980 {
981 struct htc_endpoint *endpoint;
982 struct htc_endpoint_credit_dist *cred_dist;
983
984 /*
985 * Run through the credit distribution list to see if there are
986 * packets queued. NOTE: no locks need to be taken since the
987 * distribution list is not dynamic (cannot be re-ordered) and we
988 * are not modifying any state.
989 */
990 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
991 endpoint = cred_dist->htc_ep;
992
993 spin_lock_bh(&target->tx_lock);
994 if (!list_empty(&endpoint->txq)) {
995 ath6kl_dbg(ATH6KL_DBG_HTC,
996 "htc creds ep %d credits %d pkts %d\n",
997 cred_dist->endpoint,
998 endpoint->cred_dist.credits,
999 get_queue_depth(&endpoint->txq));
1000 spin_unlock_bh(&target->tx_lock);
1001 /*
1002 * Try to start the stalled queue, this list is
1003 * ordered by priority. If there are credits
1004 * available the highest priority queue will get a
1005 * chance to reclaim credits from lower priority
1006 * ones.
1007 */
1008 ath6kl_htc_tx_from_queue(target, endpoint);
1009 spin_lock_bh(&target->tx_lock);
1010 }
1011 spin_unlock_bh(&target->tx_lock);
1012 }
1013 }
1014
1015 static int htc_setup_tx_complete(struct htc_target *target)
1016 {
1017 struct htc_packet *send_pkt = NULL;
1018 int status;
1019
1020 send_pkt = htc_get_control_buf(target, true);
1021
1022 if (!send_pkt)
1023 return -ENOMEM;
1024
1025 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
1026 struct htc_setup_comp_ext_msg *setup_comp_ext;
1027 u32 flags = 0;
1028
1029 setup_comp_ext =
1030 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
1031 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
1032 setup_comp_ext->msg_id =
1033 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1034
1035 if (target->msg_per_bndl_max > 0) {
1036 /* Indicate HTC bundling to the target */
1037 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
1038 setup_comp_ext->msg_per_rxbndl =
1039 target->msg_per_bndl_max;
1040 }
1041
1042 memcpy(&setup_comp_ext->flags, &flags,
1043 sizeof(setup_comp_ext->flags));
1044 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
1045 sizeof(struct htc_setup_comp_ext_msg),
1046 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1047
1048 } else {
1049 struct htc_setup_comp_msg *setup_comp;
1050 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
1051 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
1052 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
1053 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
1054 sizeof(struct htc_setup_comp_msg),
1055 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1056 }
1057
1058 /* we want synchronous operation */
1059 send_pkt->completion = NULL;
1060 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
1061 status = ath6kl_htc_tx_issue(target, send_pkt);
1062
1063 if (send_pkt != NULL)
1064 htc_reclaim_txctrl_buf(target, send_pkt);
1065
1066 return status;
1067 }
1068
1069 void ath6kl_htc_set_credit_dist(struct htc_target *target,
1070 struct ath6kl_htc_credit_info *credit_info,
1071 u16 srvc_pri_order[], int list_len)
1072 {
1073 struct htc_endpoint *endpoint;
1074 int i, ep;
1075
1076 target->credit_info = credit_info;
1077
1078 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
1079 &target->cred_dist_list);
1080
1081 for (i = 0; i < list_len; i++) {
1082 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
1083 endpoint = &target->endpoint[ep];
1084 if (endpoint->svc_id == srvc_pri_order[i]) {
1085 list_add_tail(&endpoint->cred_dist.list,
1086 &target->cred_dist_list);
1087 break;
1088 }
1089 }
1090 if (ep >= ENDPOINT_MAX) {
1091 WARN_ON(1);
1092 return;
1093 }
1094 }
1095 }
1096
1097 int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
1098 {
1099 struct htc_endpoint *endpoint;
1100 struct list_head queue;
1101
1102 ath6kl_dbg(ATH6KL_DBG_HTC,
1103 "htc tx ep id %d buf 0x%p len %d\n",
1104 packet->endpoint, packet->buf, packet->act_len);
1105
1106 if (packet->endpoint >= ENDPOINT_MAX) {
1107 WARN_ON(1);
1108 return -EINVAL;
1109 }
1110
1111 endpoint = &target->endpoint[packet->endpoint];
1112
1113 if (!ath6kl_htc_tx_try(target, endpoint, packet)) {
1114 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
1115 -ECANCELED : -ENOSPC;
1116 INIT_LIST_HEAD(&queue);
1117 list_add(&packet->list, &queue);
1118 htc_tx_complete(endpoint, &queue);
1119 }
1120
1121 return 0;
1122 }
1123
1124 /* flush endpoint TX queue */
1125 void ath6kl_htc_flush_txep(struct htc_target *target,
1126 enum htc_endpoint_id eid, u16 tag)
1127 {
1128 struct htc_packet *packet, *tmp_pkt;
1129 struct list_head discard_q, container;
1130 struct htc_endpoint *endpoint = &target->endpoint[eid];
1131
1132 if (!endpoint->svc_id) {
1133 WARN_ON(1);
1134 return;
1135 }
1136
1137 /* initialize the discard queue */
1138 INIT_LIST_HEAD(&discard_q);
1139
1140 spin_lock_bh(&target->tx_lock);
1141
1142 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
1143 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
1144 (tag == packet->info.tx.tag))
1145 list_move_tail(&packet->list, &discard_q);
1146 }
1147
1148 spin_unlock_bh(&target->tx_lock);
1149
1150 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
1151 packet->status = -ECANCELED;
1152 list_del(&packet->list);
1153 ath6kl_dbg(ATH6KL_DBG_HTC,
1154 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
1155 packet, packet->act_len,
1156 packet->endpoint, packet->info.tx.tag);
1157
1158 INIT_LIST_HEAD(&container);
1159 list_add_tail(&packet->list, &container);
1160 htc_tx_complete(endpoint, &container);
1161 }
1162
1163 }
1164
1165 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1166 {
1167 struct htc_endpoint *endpoint;
1168 int i;
1169
1170 dump_cred_dist_stats(target);
1171
1172 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1173 endpoint = &target->endpoint[i];
1174 if (endpoint->svc_id == 0)
1175 /* not in use.. */
1176 continue;
1177 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1178 }
1179 }
1180
1181 void ath6kl_htc_indicate_activity_change(struct htc_target *target,
1182 enum htc_endpoint_id eid, bool active)
1183 {
1184 struct htc_endpoint *endpoint = &target->endpoint[eid];
1185 bool dist = false;
1186
1187 if (endpoint->svc_id == 0) {
1188 WARN_ON(1);
1189 return;
1190 }
1191
1192 spin_lock_bh(&target->tx_lock);
1193
1194 if (active) {
1195 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
1196 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
1197 dist = true;
1198 }
1199 } else {
1200 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
1201 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
1202 dist = true;
1203 }
1204 }
1205
1206 if (dist) {
1207 endpoint->cred_dist.txq_depth =
1208 get_queue_depth(&endpoint->txq);
1209
1210 ath6kl_dbg(ATH6KL_DBG_HTC,
1211 "htc tx activity ctxt 0x%p dist 0x%p\n",
1212 target->credit_info, &target->cred_dist_list);
1213
1214 ath6kl_credit_distribute(target->credit_info,
1215 &target->cred_dist_list,
1216 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
1217 }
1218
1219 spin_unlock_bh(&target->tx_lock);
1220
1221 if (dist && !active)
1222 htc_chk_ep_txq(target);
1223 }
1224
1225 /* HTC Rx */
1226
1227 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint,
1228 int n_look_ahds)
1229 {
1230 endpoint->ep_st.rx_pkts++;
1231 if (n_look_ahds == 1)
1232 endpoint->ep_st.rx_lkahds++;
1233 else if (n_look_ahds > 1)
1234 endpoint->ep_st.rx_bundle_lkahd++;
1235 }
1236
1237 static inline bool htc_valid_rx_frame_len(struct htc_target *target,
1238 enum htc_endpoint_id eid, int len)
1239 {
1240 return (eid == target->dev->ar->ctrl_ep) ?
1241 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
1242 }
1243
1244 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1245 {
1246 struct list_head queue;
1247
1248 INIT_LIST_HEAD(&queue);
1249 list_add_tail(&packet->list, &queue);
1250 return ath6kl_htc_add_rxbuf_multiple(target, &queue);
1251 }
1252
1253 static void htc_reclaim_rxbuf(struct htc_target *target,
1254 struct htc_packet *packet,
1255 struct htc_endpoint *ep)
1256 {
1257 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
1258 htc_rxpkt_reset(packet);
1259 packet->status = -ECANCELED;
1260 ep->ep_cb.rx(ep->target, packet);
1261 } else {
1262 htc_rxpkt_reset(packet);
1263 htc_add_rxbuf((void *)(target), packet);
1264 }
1265 }
1266
1267 static void reclaim_rx_ctrl_buf(struct htc_target *target,
1268 struct htc_packet *packet)
1269 {
1270 spin_lock_bh(&target->htc_lock);
1271 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
1272 spin_unlock_bh(&target->htc_lock);
1273 }
1274
1275 static int ath6kl_htc_rx_packet(struct htc_target *target,
1276 struct htc_packet *packet,
1277 u32 rx_len)
1278 {
1279 struct ath6kl_device *dev = target->dev;
1280 u32 padded_len;
1281 int status;
1282
1283 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
1284
1285 if (padded_len > packet->buf_len) {
1286 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
1287 padded_len, rx_len, packet->buf_len);
1288 return -ENOMEM;
1289 }
1290
1291 ath6kl_dbg(ATH6KL_DBG_HTC,
1292 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
1293 packet, packet->info.rx.exp_hdr,
1294 padded_len, dev->ar->mbox_info.htc_addr);
1295
1296 status = hif_read_write_sync(dev->ar,
1297 dev->ar->mbox_info.htc_addr,
1298 packet->buf, padded_len,
1299 HIF_RD_SYNC_BLOCK_FIX);
1300
1301 packet->status = status;
1302
1303 return status;
1304 }
1305
1306 /*
1307 * optimization for recv packets, we can indicate a
1308 * "hint" that there are more single-packets to fetch
1309 * on this endpoint.
1310 */
1311 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd,
1312 struct htc_endpoint *endpoint,
1313 struct htc_packet *packet)
1314 {
1315 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
1316
1317 if (htc_hdr->eid == packet->endpoint) {
1318 if (!list_empty(&endpoint->rx_bufq))
1319 packet->info.rx.indicat_flags |=
1320 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1321 }
1322 }
1323
1324 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint)
1325 {
1326 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
1327
1328 if (ep_cb.rx_refill_thresh > 0) {
1329 spin_lock_bh(&endpoint->target->rx_lock);
1330 if (get_queue_depth(&endpoint->rx_bufq)
1331 < ep_cb.rx_refill_thresh) {
1332 spin_unlock_bh(&endpoint->target->rx_lock);
1333 ep_cb.rx_refill(endpoint->target, endpoint->eid);
1334 return;
1335 }
1336 spin_unlock_bh(&endpoint->target->rx_lock);
1337 }
1338 }
1339
1340 /* This function is called with rx_lock held */
1341 static int ath6kl_htc_rx_setup(struct htc_target *target,
1342 struct htc_endpoint *ep,
1343 u32 *lk_ahds, struct list_head *queue, int n_msg)
1344 {
1345 struct htc_packet *packet;
1346 /* FIXME: type of lk_ahds can't be right */
1347 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
1348 struct htc_ep_callbacks ep_cb;
1349 int status = 0, j, full_len;
1350 bool no_recycle;
1351
1352 full_len = CALC_TXRX_PADDED_LEN(target,
1353 le16_to_cpu(htc_hdr->payld_len) +
1354 sizeof(*htc_hdr));
1355
1356 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1357 ath6kl_warn("Rx buffer requested with invalid length\n");
1358 return -EINVAL;
1359 }
1360
1361 ep_cb = ep->ep_cb;
1362 for (j = 0; j < n_msg; j++) {
1363
1364 /*
1365 * Reset flag, any packets allocated using the
1366 * rx_alloc() API cannot be recycled on
1367 * cleanup,they must be explicitly returned.
1368 */
1369 no_recycle = false;
1370
1371 if (ep_cb.rx_allocthresh &&
1372 (full_len > ep_cb.rx_alloc_thresh)) {
1373 ep->ep_st.rx_alloc_thresh_hit += 1;
1374 ep->ep_st.rxalloc_thresh_byte +=
1375 le16_to_cpu(htc_hdr->payld_len);
1376
1377 spin_unlock_bh(&target->rx_lock);
1378 no_recycle = true;
1379
1380 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1381 full_len);
1382 spin_lock_bh(&target->rx_lock);
1383 } else {
1384 /* refill handler is being used */
1385 if (list_empty(&ep->rx_bufq)) {
1386 if (ep_cb.rx_refill) {
1387 spin_unlock_bh(&target->rx_lock);
1388 ep_cb.rx_refill(ep->target, ep->eid);
1389 spin_lock_bh(&target->rx_lock);
1390 }
1391 }
1392
1393 if (list_empty(&ep->rx_bufq))
1394 packet = NULL;
1395 else {
1396 packet = list_first_entry(&ep->rx_bufq,
1397 struct htc_packet, list);
1398 list_del(&packet->list);
1399 }
1400 }
1401
1402 if (!packet) {
1403 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1404 target->ep_waiting = ep->eid;
1405 return -ENOSPC;
1406 }
1407
1408 /* clear flags */
1409 packet->info.rx.rx_flags = 0;
1410 packet->info.rx.indicat_flags = 0;
1411 packet->status = 0;
1412
1413 if (no_recycle)
1414 /*
1415 * flag that these packets cannot be
1416 * recycled, they have to be returned to
1417 * the user
1418 */
1419 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1420
1421 /* Caller needs to free this upon any failure */
1422 list_add_tail(&packet->list, queue);
1423
1424 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1425 status = -ECANCELED;
1426 break;
1427 }
1428
1429 if (j) {
1430 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1431 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1432 } else
1433 /* set expected look ahead */
1434 packet->info.rx.exp_hdr = *lk_ahds;
1435
1436 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1437 HTC_HDR_LENGTH;
1438 }
1439
1440 return status;
1441 }
1442
1443 static int ath6kl_htc_rx_alloc(struct htc_target *target,
1444 u32 lk_ahds[], int msg,
1445 struct htc_endpoint *endpoint,
1446 struct list_head *queue)
1447 {
1448 int status = 0;
1449 struct htc_packet *packet, *tmp_pkt;
1450 struct htc_frame_hdr *htc_hdr;
1451 int i, n_msg;
1452
1453 spin_lock_bh(&target->rx_lock);
1454
1455 for (i = 0; i < msg; i++) {
1456
1457 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1458
1459 if (htc_hdr->eid >= ENDPOINT_MAX) {
1460 ath6kl_err("invalid ep in look-ahead: %d\n",
1461 htc_hdr->eid);
1462 status = -ENOMEM;
1463 break;
1464 }
1465
1466 if (htc_hdr->eid != endpoint->eid) {
1467 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1468 htc_hdr->eid, endpoint->eid, i);
1469 status = -ENOMEM;
1470 break;
1471 }
1472
1473 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1474 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1475 htc_hdr->payld_len,
1476 (u32) HTC_MAX_PAYLOAD_LENGTH);
1477 status = -ENOMEM;
1478 break;
1479 }
1480
1481 if (endpoint->svc_id == 0) {
1482 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1483 status = -ENOMEM;
1484 break;
1485 }
1486
1487 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1488 /*
1489 * HTC header indicates that every packet to follow
1490 * has the same padded length so that it can be
1491 * optimally fetched as a full bundle.
1492 */
1493 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1494 HTC_FLG_RX_BNDL_CNT_S;
1495
1496 /* the count doesn't include the starter frame */
1497 n_msg++;
1498 if (n_msg > target->msg_per_bndl_max) {
1499 status = -ENOMEM;
1500 break;
1501 }
1502
1503 endpoint->ep_st.rx_bundle_from_hdr += 1;
1504 ath6kl_dbg(ATH6KL_DBG_HTC,
1505 "htc rx bundle pkts %d\n",
1506 n_msg);
1507 } else
1508 /* HTC header only indicates 1 message to fetch */
1509 n_msg = 1;
1510
1511 /* Setup packet buffers for each message */
1512 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
1513 queue, n_msg);
1514
1515 /*
1516 * This is due to unavailabilty of buffers to rx entire data.
1517 * Return no error so that free buffers from queue can be used
1518 * to receive partial data.
1519 */
1520 if (status == -ENOSPC) {
1521 spin_unlock_bh(&target->rx_lock);
1522 return 0;
1523 }
1524
1525 if (status)
1526 break;
1527 }
1528
1529 spin_unlock_bh(&target->rx_lock);
1530
1531 if (status) {
1532 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1533 list_del(&packet->list);
1534 htc_reclaim_rxbuf(target, packet,
1535 &target->endpoint[packet->endpoint]);
1536 }
1537 }
1538
1539 return status;
1540 }
1541
1542 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1543 {
1544 if (packets->endpoint != ENDPOINT_0) {
1545 WARN_ON(1);
1546 return;
1547 }
1548
1549 if (packets->status == -ECANCELED) {
1550 reclaim_rx_ctrl_buf(context, packets);
1551 return;
1552 }
1553
1554 if (packets->act_len > 0) {
1555 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1556 packets->act_len + HTC_HDR_LENGTH);
1557
1558 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1559 "htc rx unexpected endpoint 0 message", "",
1560 packets->buf - HTC_HDR_LENGTH,
1561 packets->act_len + HTC_HDR_LENGTH);
1562 }
1563
1564 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1565 }
1566
1567 static void htc_proc_cred_rpt(struct htc_target *target,
1568 struct htc_credit_report *rpt,
1569 int n_entries,
1570 enum htc_endpoint_id from_ep)
1571 {
1572 struct htc_endpoint *endpoint;
1573 int tot_credits = 0, i;
1574 bool dist = false;
1575
1576 spin_lock_bh(&target->tx_lock);
1577
1578 for (i = 0; i < n_entries; i++, rpt++) {
1579 if (rpt->eid >= ENDPOINT_MAX) {
1580 WARN_ON(1);
1581 spin_unlock_bh(&target->tx_lock);
1582 return;
1583 }
1584
1585 endpoint = &target->endpoint[rpt->eid];
1586
1587 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1588 "credit report ep %d credits %d\n",
1589 rpt->eid, rpt->credits);
1590
1591 endpoint->ep_st.tx_cred_rpt += 1;
1592 endpoint->ep_st.cred_retnd += rpt->credits;
1593
1594 if (from_ep == rpt->eid) {
1595 /*
1596 * This credit report arrived on the same endpoint
1597 * indicating it arrived in an RX packet.
1598 */
1599 endpoint->ep_st.cred_from_rx += rpt->credits;
1600 endpoint->ep_st.cred_rpt_from_rx += 1;
1601 } else if (from_ep == ENDPOINT_0) {
1602 /* credit arrived on endpoint 0 as a NULL message */
1603 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1604 endpoint->ep_st.cred_rpt_ep0 += 1;
1605 } else {
1606 endpoint->ep_st.cred_from_other += rpt->credits;
1607 endpoint->ep_st.cred_rpt_from_other += 1;
1608 }
1609
1610 if (rpt->eid == ENDPOINT_0)
1611 /* always give endpoint 0 credits back */
1612 endpoint->cred_dist.credits += rpt->credits;
1613 else {
1614 endpoint->cred_dist.cred_to_dist += rpt->credits;
1615 dist = true;
1616 }
1617
1618 /*
1619 * Refresh tx depth for distribution function that will
1620 * recover these credits NOTE: this is only valid when
1621 * there are credits to recover!
1622 */
1623 endpoint->cred_dist.txq_depth =
1624 get_queue_depth(&endpoint->txq);
1625
1626 tot_credits += rpt->credits;
1627 }
1628
1629 if (dist) {
1630 /*
1631 * This was a credit return based on a completed send
1632 * operations note, this is done with the lock held
1633 */
1634 ath6kl_credit_distribute(target->credit_info,
1635 &target->cred_dist_list,
1636 HTC_CREDIT_DIST_SEND_COMPLETE);
1637 }
1638
1639 spin_unlock_bh(&target->tx_lock);
1640
1641 if (tot_credits)
1642 htc_chk_ep_txq(target);
1643 }
1644
1645 static int htc_parse_trailer(struct htc_target *target,
1646 struct htc_record_hdr *record,
1647 u8 *record_buf, u32 *next_lk_ahds,
1648 enum htc_endpoint_id endpoint,
1649 int *n_lk_ahds)
1650 {
1651 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1652 struct htc_lookahead_report *lk_ahd;
1653 int len;
1654
1655 switch (record->rec_id) {
1656 case HTC_RECORD_CREDITS:
1657 len = record->len / sizeof(struct htc_credit_report);
1658 if (!len) {
1659 WARN_ON(1);
1660 return -EINVAL;
1661 }
1662
1663 htc_proc_cred_rpt(target,
1664 (struct htc_credit_report *) record_buf,
1665 len, endpoint);
1666 break;
1667 case HTC_RECORD_LOOKAHEAD:
1668 len = record->len / sizeof(*lk_ahd);
1669 if (!len) {
1670 WARN_ON(1);
1671 return -EINVAL;
1672 }
1673
1674 lk_ahd = (struct htc_lookahead_report *) record_buf;
1675 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1676 && next_lk_ahds) {
1677
1678 ath6kl_dbg(ATH6KL_DBG_HTC,
1679 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1680 lk_ahd->pre_valid, lk_ahd->post_valid);
1681
1682 /* look ahead bytes are valid, copy them over */
1683 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1684
1685 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1686 "htc rx next look ahead",
1687 "", next_lk_ahds, 4);
1688
1689 *n_lk_ahds = 1;
1690 }
1691 break;
1692 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1693 len = record->len / sizeof(*bundle_lkahd_rpt);
1694 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1695 WARN_ON(1);
1696 return -EINVAL;
1697 }
1698
1699 if (next_lk_ahds) {
1700 int i;
1701
1702 bundle_lkahd_rpt =
1703 (struct htc_bundle_lkahd_rpt *) record_buf;
1704
1705 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1706 "", record_buf, record->len);
1707
1708 for (i = 0; i < len; i++) {
1709 memcpy((u8 *)&next_lk_ahds[i],
1710 bundle_lkahd_rpt->lk_ahd, 4);
1711 bundle_lkahd_rpt++;
1712 }
1713
1714 *n_lk_ahds = i;
1715 }
1716 break;
1717 default:
1718 ath6kl_err("unhandled record: id:%d len:%d\n",
1719 record->rec_id, record->len);
1720 break;
1721 }
1722
1723 return 0;
1724
1725 }
1726
1727 static int htc_proc_trailer(struct htc_target *target,
1728 u8 *buf, int len, u32 *next_lk_ahds,
1729 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1730 {
1731 struct htc_record_hdr *record;
1732 int orig_len;
1733 int status;
1734 u8 *record_buf;
1735 u8 *orig_buf;
1736
1737 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1738 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1739
1740 orig_buf = buf;
1741 orig_len = len;
1742 status = 0;
1743
1744 while (len > 0) {
1745
1746 if (len < sizeof(struct htc_record_hdr)) {
1747 status = -ENOMEM;
1748 break;
1749 }
1750 /* these are byte aligned structs */
1751 record = (struct htc_record_hdr *) buf;
1752 len -= sizeof(struct htc_record_hdr);
1753 buf += sizeof(struct htc_record_hdr);
1754
1755 if (record->len > len) {
1756 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1757 record->len, record->rec_id, len);
1758 status = -ENOMEM;
1759 break;
1760 }
1761 record_buf = buf;
1762
1763 status = htc_parse_trailer(target, record, record_buf,
1764 next_lk_ahds, endpoint, n_lk_ahds);
1765
1766 if (status)
1767 break;
1768
1769 /* advance buffer past this record for next time around */
1770 buf += record->len;
1771 len -= record->len;
1772 }
1773
1774 if (status)
1775 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1776 "", orig_buf, orig_len);
1777
1778 return status;
1779 }
1780
1781 static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1782 struct htc_packet *packet,
1783 u32 *next_lkahds, int *n_lkahds)
1784 {
1785 int status = 0;
1786 u16 payload_len;
1787 u32 lk_ahd;
1788 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1789
1790 if (n_lkahds != NULL)
1791 *n_lkahds = 0;
1792
1793 /*
1794 * NOTE: we cannot assume the alignment of buf, so we use the safe
1795 * macros to retrieve 16 bit fields.
1796 */
1797 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1798
1799 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1800
1801 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1802 /*
1803 * Refresh the expected header and the actual length as it
1804 * was unknown when this packet was grabbed as part of the
1805 * bundle.
1806 */
1807 packet->info.rx.exp_hdr = lk_ahd;
1808 packet->act_len = payload_len + HTC_HDR_LENGTH;
1809
1810 /* validate the actual header that was refreshed */
1811 if (packet->act_len > packet->buf_len) {
1812 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1813 payload_len, lk_ahd);
1814 /*
1815 * Limit this to max buffer just to print out some
1816 * of the buffer.
1817 */
1818 packet->act_len = min(packet->act_len, packet->buf_len);
1819 status = -ENOMEM;
1820 goto fail_rx;
1821 }
1822
1823 if (packet->endpoint != htc_hdr->eid) {
1824 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1825 htc_hdr->eid, packet->endpoint);
1826 status = -ENOMEM;
1827 goto fail_rx;
1828 }
1829 }
1830
1831 if (lk_ahd != packet->info.rx.exp_hdr) {
1832 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1833 __func__, packet, packet->info.rx.rx_flags);
1834 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1835 "", &packet->info.rx.exp_hdr, 4);
1836 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1837 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1838 status = -ENOMEM;
1839 goto fail_rx;
1840 }
1841
1842 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1843 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1844 htc_hdr->ctrl[0] > payload_len) {
1845 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1846 __func__, payload_len, htc_hdr->ctrl[0]);
1847 status = -ENOMEM;
1848 goto fail_rx;
1849 }
1850
1851 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1852 next_lkahds = NULL;
1853 n_lkahds = NULL;
1854 }
1855
1856 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1857 + payload_len - htc_hdr->ctrl[0],
1858 htc_hdr->ctrl[0], next_lkahds,
1859 n_lkahds, packet->endpoint);
1860
1861 if (status)
1862 goto fail_rx;
1863
1864 packet->act_len -= htc_hdr->ctrl[0];
1865 }
1866
1867 packet->buf += HTC_HDR_LENGTH;
1868 packet->act_len -= HTC_HDR_LENGTH;
1869
1870 fail_rx:
1871 if (status)
1872 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1873 "", packet->buf, packet->act_len);
1874
1875 return status;
1876 }
1877
1878 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1879 struct htc_packet *packet)
1880 {
1881 ath6kl_dbg(ATH6KL_DBG_HTC,
1882 "htc rx complete ep %d packet 0x%p\n",
1883 endpoint->eid, packet);
1884 endpoint->ep_cb.rx(endpoint->target, packet);
1885 }
1886
1887 static int ath6kl_htc_rx_bundle(struct htc_target *target,
1888 struct list_head *rxq,
1889 struct list_head *sync_compq,
1890 int *n_pkt_fetched, bool part_bundle)
1891 {
1892 struct hif_scatter_req *scat_req;
1893 struct htc_packet *packet;
1894 int rem_space = target->max_rx_bndl_sz;
1895 int n_scat_pkt, status = 0, i, len;
1896
1897 n_scat_pkt = get_queue_depth(rxq);
1898 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1899
1900 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1901 /*
1902 * We were forced to split this bundle receive operation
1903 * all packets in this partial bundle must have their
1904 * lookaheads ignored.
1905 */
1906 part_bundle = true;
1907
1908 /*
1909 * This would only happen if the target ignored our max
1910 * bundle limit.
1911 */
1912 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
1913 __func__, get_queue_depth(rxq), n_scat_pkt);
1914 }
1915
1916 len = 0;
1917
1918 ath6kl_dbg(ATH6KL_DBG_HTC,
1919 "htc rx bundle depth %d pkts %d\n",
1920 get_queue_depth(rxq), n_scat_pkt);
1921
1922 scat_req = hif_scatter_req_get(target->dev->ar);
1923
1924 if (scat_req == NULL)
1925 goto fail_rx_pkt;
1926
1927 for (i = 0; i < n_scat_pkt; i++) {
1928 int pad_len;
1929
1930 packet = list_first_entry(rxq, struct htc_packet, list);
1931 list_del(&packet->list);
1932
1933 pad_len = CALC_TXRX_PADDED_LEN(target,
1934 packet->act_len);
1935
1936 if ((rem_space - pad_len) < 0) {
1937 list_add(&packet->list, rxq);
1938 break;
1939 }
1940
1941 rem_space -= pad_len;
1942
1943 if (part_bundle || (i < (n_scat_pkt - 1)))
1944 /*
1945 * Packet 0..n-1 cannot be checked for look-aheads
1946 * since we are fetching a bundle the last packet
1947 * however can have it's lookahead used
1948 */
1949 packet->info.rx.rx_flags |=
1950 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1951
1952 /* NOTE: 1 HTC packet per scatter entry */
1953 scat_req->scat_list[i].buf = packet->buf;
1954 scat_req->scat_list[i].len = pad_len;
1955
1956 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1957
1958 list_add_tail(&packet->list, sync_compq);
1959
1960 WARN_ON(!scat_req->scat_list[i].len);
1961 len += scat_req->scat_list[i].len;
1962 }
1963
1964 scat_req->len = len;
1965 scat_req->scat_entries = i;
1966
1967 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1968
1969 if (!status)
1970 *n_pkt_fetched = i;
1971
1972 /* free scatter request */
1973 hif_scatter_req_add(target->dev->ar, scat_req);
1974
1975 fail_rx_pkt:
1976
1977 return status;
1978 }
1979
1980 static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1981 struct list_head *comp_pktq,
1982 u32 lk_ahds[],
1983 int *n_lk_ahd)
1984 {
1985 struct htc_packet *packet, *tmp_pkt;
1986 struct htc_endpoint *ep;
1987 int status = 0;
1988
1989 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1990 ep = &target->endpoint[packet->endpoint];
1991
1992 /* process header for each of the recv packet */
1993 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
1994 n_lk_ahd);
1995 if (status)
1996 return status;
1997
1998 list_del(&packet->list);
1999
2000 if (list_empty(comp_pktq)) {
2001 /*
2002 * Last packet's more packet flag is set
2003 * based on the lookahead.
2004 */
2005 if (*n_lk_ahd > 0)
2006 ath6kl_htc_rx_set_indicate(lk_ahds[0],
2007 ep, packet);
2008 } else
2009 /*
2010 * Packets in a bundle automatically have
2011 * this flag set.
2012 */
2013 packet->info.rx.indicat_flags |=
2014 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
2015
2016 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
2017
2018 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
2019 ep->ep_st.rx_bundl += 1;
2020
2021 ath6kl_htc_rx_complete(ep, packet);
2022 }
2023
2024 return status;
2025 }
2026
2027 static int ath6kl_htc_rx_fetch(struct htc_target *target,
2028 struct list_head *rx_pktq,
2029 struct list_head *comp_pktq)
2030 {
2031 int fetched_pkts;
2032 bool part_bundle = false;
2033 int status = 0;
2034 struct list_head tmp_rxq;
2035 struct htc_packet *packet, *tmp_pkt;
2036
2037 /* now go fetch the list of HTC packets */
2038 while (!list_empty(rx_pktq)) {
2039 fetched_pkts = 0;
2040
2041 INIT_LIST_HEAD(&tmp_rxq);
2042
2043 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
2044 /*
2045 * There are enough packets to attempt a
2046 * bundle transfer and recv bundling is
2047 * allowed.
2048 */
2049 status = ath6kl_htc_rx_bundle(target, rx_pktq,
2050 &tmp_rxq,
2051 &fetched_pkts,
2052 part_bundle);
2053 if (status)
2054 goto fail_rx;
2055
2056 if (!list_empty(rx_pktq))
2057 part_bundle = true;
2058
2059 list_splice_tail_init(&tmp_rxq, comp_pktq);
2060 }
2061
2062 if (!fetched_pkts) {
2063
2064 packet = list_first_entry(rx_pktq, struct htc_packet,
2065 list);
2066
2067 /* fully synchronous */
2068 packet->completion = NULL;
2069
2070 if (!list_is_singular(rx_pktq))
2071 /*
2072 * look_aheads in all packet
2073 * except the last one in the
2074 * bundle must be ignored
2075 */
2076 packet->info.rx.rx_flags |=
2077 HTC_RX_PKT_IGNORE_LOOKAHEAD;
2078
2079 /* go fetch the packet */
2080 status = ath6kl_htc_rx_packet(target, packet,
2081 packet->act_len);
2082
2083 list_move_tail(&packet->list, &tmp_rxq);
2084
2085 if (status)
2086 goto fail_rx;
2087
2088 list_splice_tail_init(&tmp_rxq, comp_pktq);
2089 }
2090 }
2091
2092 return 0;
2093
2094 fail_rx:
2095
2096 /*
2097 * Cleanup any packets we allocated but didn't use to
2098 * actually fetch any packets.
2099 */
2100
2101 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2102 list_del(&packet->list);
2103 htc_reclaim_rxbuf(target, packet,
2104 &target->endpoint[packet->endpoint]);
2105 }
2106
2107 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2108 list_del(&packet->list);
2109 htc_reclaim_rxbuf(target, packet,
2110 &target->endpoint[packet->endpoint]);
2111 }
2112
2113 return status;
2114 }
2115
2116 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
2117 u32 msg_look_ahead, int *num_pkts)
2118 {
2119 struct htc_packet *packets, *tmp_pkt;
2120 struct htc_endpoint *endpoint;
2121 struct list_head rx_pktq, comp_pktq;
2122 int status = 0;
2123 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
2124 int num_look_ahead = 1;
2125 enum htc_endpoint_id id;
2126 int n_fetched = 0;
2127
2128 INIT_LIST_HEAD(&comp_pktq);
2129 *num_pkts = 0;
2130
2131 /*
2132 * On first entry copy the look_aheads into our temp array for
2133 * processing
2134 */
2135 look_aheads[0] = msg_look_ahead;
2136
2137 while (true) {
2138
2139 /*
2140 * First lookahead sets the expected endpoint IDs for all
2141 * packets in a bundle.
2142 */
2143 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
2144 endpoint = &target->endpoint[id];
2145
2146 if (id >= ENDPOINT_MAX) {
2147 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
2148 id);
2149 status = -ENOMEM;
2150 break;
2151 }
2152
2153 INIT_LIST_HEAD(&rx_pktq);
2154 INIT_LIST_HEAD(&comp_pktq);
2155
2156 /*
2157 * Try to allocate as many HTC RX packets indicated by the
2158 * look_aheads.
2159 */
2160 status = ath6kl_htc_rx_alloc(target, look_aheads,
2161 num_look_ahead, endpoint,
2162 &rx_pktq);
2163 if (status)
2164 break;
2165
2166 if (get_queue_depth(&rx_pktq) >= 2)
2167 /*
2168 * A recv bundle was detected, force IRQ status
2169 * re-check again
2170 */
2171 target->chk_irq_status_cnt = 1;
2172
2173 n_fetched += get_queue_depth(&rx_pktq);
2174
2175 num_look_ahead = 0;
2176
2177 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
2178
2179 if (!status)
2180 ath6kl_htc_rx_chk_water_mark(endpoint);
2181
2182 /* Process fetched packets */
2183 status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
2184 look_aheads,
2185 &num_look_ahead);
2186
2187 if (!num_look_ahead || status)
2188 break;
2189
2190 /*
2191 * For SYNCH processing, if we get here, we are running
2192 * through the loop again due to a detected lookahead. Set
2193 * flag that we should re-check IRQ status registers again
2194 * before leaving IRQ processing, this can net better
2195 * performance in high throughput situations.
2196 */
2197 target->chk_irq_status_cnt = 1;
2198 }
2199
2200 if (status) {
2201 ath6kl_err("failed to get pending recv messages: %d\n",
2202 status);
2203
2204 /* cleanup any packets in sync completion queue */
2205 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
2206 list_del(&packets->list);
2207 htc_reclaim_rxbuf(target, packets,
2208 &target->endpoint[packets->endpoint]);
2209 }
2210
2211 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2212 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
2213 ath6kl_hif_rx_control(target->dev, false);
2214 }
2215 }
2216
2217 /*
2218 * Before leaving, check to see if host ran out of buffers and
2219 * needs to stop the receiver.
2220 */
2221 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2222 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
2223 ath6kl_hif_rx_control(target->dev, false);
2224 }
2225 *num_pkts = n_fetched;
2226
2227 return status;
2228 }
2229
2230 /*
2231 * Synchronously wait for a control message from the target,
2232 * This function is used at initialization time ONLY. At init messages
2233 * on ENDPOINT 0 are expected.
2234 */
2235 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2236 {
2237 struct htc_packet *packet = NULL;
2238 struct htc_frame_hdr *htc_hdr;
2239 u32 look_ahead;
2240
2241 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
2242 HTC_TARGET_RESPONSE_TIMEOUT))
2243 return NULL;
2244
2245 ath6kl_dbg(ATH6KL_DBG_HTC,
2246 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
2247
2248 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
2249
2250 if (htc_hdr->eid != ENDPOINT_0)
2251 return NULL;
2252
2253 packet = htc_get_control_buf(target, false);
2254
2255 if (!packet)
2256 return NULL;
2257
2258 packet->info.rx.rx_flags = 0;
2259 packet->info.rx.exp_hdr = look_ahead;
2260 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
2261
2262 if (packet->act_len > packet->buf_len)
2263 goto fail_ctrl_rx;
2264
2265 /* we want synchronous operation */
2266 packet->completion = NULL;
2267
2268 /* get the message from the device, this will block */
2269 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2270 goto fail_ctrl_rx;
2271
2272 /* process receive header */
2273 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2274
2275 if (packet->status) {
2276 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n",
2277 packet->status);
2278 goto fail_ctrl_rx;
2279 }
2280
2281 return packet;
2282
2283 fail_ctrl_rx:
2284 if (packet != NULL) {
2285 htc_rxpkt_reset(packet);
2286 reclaim_rx_ctrl_buf(target, packet);
2287 }
2288
2289 return NULL;
2290 }
2291
2292 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2293 struct list_head *pkt_queue)
2294 {
2295 struct htc_endpoint *endpoint;
2296 struct htc_packet *first_pkt;
2297 bool rx_unblock = false;
2298 int status = 0, depth;
2299
2300 if (list_empty(pkt_queue))
2301 return -ENOMEM;
2302
2303 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
2304
2305 if (first_pkt->endpoint >= ENDPOINT_MAX)
2306 return status;
2307
2308 depth = get_queue_depth(pkt_queue);
2309
2310 ath6kl_dbg(ATH6KL_DBG_HTC,
2311 "htc rx add multiple ep id %d cnt %d len %d\n",
2312 first_pkt->endpoint, depth, first_pkt->buf_len);
2313
2314 endpoint = &target->endpoint[first_pkt->endpoint];
2315
2316 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
2317 struct htc_packet *packet, *tmp_pkt;
2318
2319 /* walk through queue and mark each one canceled */
2320 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
2321 packet->status = -ECANCELED;
2322 list_del(&packet->list);
2323 ath6kl_htc_rx_complete(endpoint, packet);
2324 }
2325
2326 return status;
2327 }
2328
2329 spin_lock_bh(&target->rx_lock);
2330
2331 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
2332
2333 /* check if we are blocked waiting for a new buffer */
2334 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
2335 if (target->ep_waiting == first_pkt->endpoint) {
2336 ath6kl_dbg(ATH6KL_DBG_HTC,
2337 "htc rx blocked on ep %d, unblocking\n",
2338 target->ep_waiting);
2339 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
2340 target->ep_waiting = ENDPOINT_MAX;
2341 rx_unblock = true;
2342 }
2343 }
2344
2345 spin_unlock_bh(&target->rx_lock);
2346
2347 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
2348 /* TODO : implement a buffer threshold count? */
2349 ath6kl_hif_rx_control(target->dev, true);
2350
2351 return status;
2352 }
2353
2354 void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2355 {
2356 struct htc_endpoint *endpoint;
2357 struct htc_packet *packet, *tmp_pkt;
2358 int i;
2359
2360 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2361 endpoint = &target->endpoint[i];
2362 if (!endpoint->svc_id)
2363 /* not in use.. */
2364 continue;
2365
2366 spin_lock_bh(&target->rx_lock);
2367 list_for_each_entry_safe(packet, tmp_pkt,
2368 &endpoint->rx_bufq, list) {
2369 list_del(&packet->list);
2370 spin_unlock_bh(&target->rx_lock);
2371 ath6kl_dbg(ATH6KL_DBG_HTC,
2372 "htc rx flush pkt 0x%p len %d ep %d\n",
2373 packet, packet->buf_len,
2374 packet->endpoint);
2375 /*
2376 * packets in rx_bufq of endpoint 0 have originally
2377 * been queued from target->free_ctrl_rxbuf where
2378 * packet and packet->buf_start are allocated
2379 * separately using kmalloc(). For other endpoint
2380 * rx_bufq, it is allocated as skb where packet is
2381 * skb->head. Take care of this difference while freeing
2382 * the memory.
2383 */
2384 if (packet->endpoint == ENDPOINT_0) {
2385 kfree(packet->buf_start);
2386 kfree(packet);
2387 } else {
2388 dev_kfree_skb(packet->pkt_cntxt);
2389 }
2390 spin_lock_bh(&target->rx_lock);
2391 }
2392 spin_unlock_bh(&target->rx_lock);
2393 }
2394 }
2395
2396 int ath6kl_htc_conn_service(struct htc_target *target,
2397 struct htc_service_connect_req *conn_req,
2398 struct htc_service_connect_resp *conn_resp)
2399 {
2400 struct htc_packet *rx_pkt = NULL;
2401 struct htc_packet *tx_pkt = NULL;
2402 struct htc_conn_service_resp *resp_msg;
2403 struct htc_conn_service_msg *conn_msg;
2404 struct htc_endpoint *endpoint;
2405 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2406 unsigned int max_msg_sz = 0;
2407 int status = 0;
2408
2409 ath6kl_dbg(ATH6KL_DBG_HTC,
2410 "htc connect service target 0x%p service id 0x%x\n",
2411 target, conn_req->svc_id);
2412
2413 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2414 /* special case for pseudo control service */
2415 assigned_ep = ENDPOINT_0;
2416 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2417 } else {
2418 /* allocate a packet to send to the target */
2419 tx_pkt = htc_get_control_buf(target, true);
2420
2421 if (!tx_pkt)
2422 return -ENOMEM;
2423
2424 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2425 memset(conn_msg, 0, sizeof(*conn_msg));
2426 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2427 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2428 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2429
2430 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2431 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2432 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2433
2434 /* we want synchronous operation */
2435 tx_pkt->completion = NULL;
2436 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0);
2437 status = ath6kl_htc_tx_issue(target, tx_pkt);
2438
2439 if (status)
2440 goto fail_tx;
2441
2442 /* wait for response */
2443 rx_pkt = htc_wait_for_ctrl_msg(target);
2444
2445 if (!rx_pkt) {
2446 status = -ENOMEM;
2447 goto fail_tx;
2448 }
2449
2450 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2451
2452 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2453 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2454 status = -ENOMEM;
2455 goto fail_tx;
2456 }
2457
2458 conn_resp->resp_code = resp_msg->status;
2459 /* check response status */
2460 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2461 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2462 resp_msg->svc_id, resp_msg->status);
2463 status = -ENOMEM;
2464 goto fail_tx;
2465 }
2466
2467 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2468 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2469 }
2470
2471 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2472 status = -ENOMEM;
2473 goto fail_tx;
2474 }
2475
2476 endpoint = &target->endpoint[assigned_ep];
2477 endpoint->eid = assigned_ep;
2478 if (endpoint->svc_id) {
2479 status = -ENOMEM;
2480 goto fail_tx;
2481 }
2482
2483 /* return assigned endpoint to caller */
2484 conn_resp->endpoint = assigned_ep;
2485 conn_resp->len_max = max_msg_sz;
2486
2487 /* setup the endpoint */
2488
2489 /* this marks the endpoint in use */
2490 endpoint->svc_id = conn_req->svc_id;
2491
2492 endpoint->max_txq_depth = conn_req->max_txq_depth;
2493 endpoint->len_max = max_msg_sz;
2494 endpoint->ep_cb = conn_req->ep_cb;
2495 endpoint->cred_dist.svc_id = conn_req->svc_id;
2496 endpoint->cred_dist.htc_ep = endpoint;
2497 endpoint->cred_dist.endpoint = assigned_ep;
2498 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2499
2500 switch (endpoint->svc_id) {
2501 case WMI_DATA_BK_SVC:
2502 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3;
2503 break;
2504 default:
2505 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
2506 break;
2507 }
2508
2509 if (conn_req->max_rxmsg_sz) {
2510 /*
2511 * Override cred_per_msg calculation, this optimizes
2512 * the credit-low indications since the host will actually
2513 * issue smaller messages in the Send path.
2514 */
2515 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2516 status = -ENOMEM;
2517 goto fail_tx;
2518 }
2519 endpoint->cred_dist.cred_per_msg =
2520 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2521 } else
2522 endpoint->cred_dist.cred_per_msg =
2523 max_msg_sz / target->tgt_cred_sz;
2524
2525 if (!endpoint->cred_dist.cred_per_msg)
2526 endpoint->cred_dist.cred_per_msg = 1;
2527
2528 /* save local connection flags */
2529 endpoint->conn_flags = conn_req->flags;
2530
2531 fail_tx:
2532 if (tx_pkt)
2533 htc_reclaim_txctrl_buf(target, tx_pkt);
2534
2535 if (rx_pkt) {
2536 htc_rxpkt_reset(rx_pkt);
2537 reclaim_rx_ctrl_buf(target, rx_pkt);
2538 }
2539
2540 return status;
2541 }
2542
2543 static void reset_ep_state(struct htc_target *target)
2544 {
2545 struct htc_endpoint *endpoint;
2546 int i;
2547
2548 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2549 endpoint = &target->endpoint[i];
2550 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2551 endpoint->svc_id = 0;
2552 endpoint->len_max = 0;
2553 endpoint->max_txq_depth = 0;
2554 memset(&endpoint->ep_st, 0,
2555 sizeof(endpoint->ep_st));
2556 INIT_LIST_HEAD(&endpoint->rx_bufq);
2557 INIT_LIST_HEAD(&endpoint->txq);
2558 endpoint->target = target;
2559 }
2560
2561 /* reset distribution list */
2562 /* FIXME: free existing entries */
2563 INIT_LIST_HEAD(&target->cred_dist_list);
2564 }
2565
2566 int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
2567 enum htc_endpoint_id endpoint)
2568 {
2569 int num;
2570
2571 spin_lock_bh(&target->rx_lock);
2572 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2573 spin_unlock_bh(&target->rx_lock);
2574 return num;
2575 }
2576
2577 static void htc_setup_msg_bndl(struct htc_target *target)
2578 {
2579 /* limit what HTC can handle */
2580 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2581 target->msg_per_bndl_max);
2582
2583 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2584 target->msg_per_bndl_max = 0;
2585 return;
2586 }
2587
2588 /* limit bundle what the device layer can handle */
2589 target->msg_per_bndl_max = min(target->max_scat_entries,
2590 target->msg_per_bndl_max);
2591
2592 ath6kl_dbg(ATH6KL_DBG_BOOT,
2593 "htc bundling allowed msg_per_bndl_max %d\n",
2594 target->msg_per_bndl_max);
2595
2596 /* Max rx bundle size is limited by the max tx bundle size */
2597 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2598 /* Max tx bundle size if limited by the extended mbox address range */
2599 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2600 target->max_xfer_szper_scatreq);
2601
2602 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2603 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2604
2605 if (target->max_tx_bndl_sz)
2606 /* tx_bndl_mask is enabled per AC, each has 1 bit */
2607 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1;
2608
2609 if (target->max_rx_bndl_sz)
2610 target->rx_bndl_enable = true;
2611
2612 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2613 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2614 target->tgt_cred_sz);
2615
2616 /*
2617 * Disallow send bundling since the credit size is
2618 * not aligned to a block size the I/O block
2619 * padding will spill into the next credit buffer
2620 * which is fatal.
2621 */
2622 target->tx_bndl_mask = 0;
2623 }
2624 }
2625
2626 int ath6kl_htc_wait_target(struct htc_target *target)
2627 {
2628 struct htc_packet *packet = NULL;
2629 struct htc_ready_ext_msg *rdy_msg;
2630 struct htc_service_connect_req connect;
2631 struct htc_service_connect_resp resp;
2632 int status;
2633
2634 /* FIXME: remove once USB support is implemented */
2635 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
2636 ath6kl_err("HTC doesn't support USB yet. Patience!\n");
2637 return -EOPNOTSUPP;
2638 }
2639
2640 /* we should be getting 1 control message that the target is ready */
2641 packet = htc_wait_for_ctrl_msg(target);
2642
2643 if (!packet)
2644 return -ENOMEM;
2645
2646 /* we controlled the buffer creation so it's properly aligned */
2647 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2648
2649 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2650 (packet->act_len < sizeof(struct htc_ready_msg))) {
2651 status = -ENOMEM;
2652 goto fail_wait_target;
2653 }
2654
2655 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2656 status = -ENOMEM;
2657 goto fail_wait_target;
2658 }
2659
2660 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2661 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2662
2663 ath6kl_dbg(ATH6KL_DBG_BOOT,
2664 "htc target ready credits %d size %d\n",
2665 target->tgt_creds, target->tgt_cred_sz);
2666
2667 /* check if this is an extended ready message */
2668 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2669 /* this is an extended message */
2670 target->htc_tgt_ver = rdy_msg->htc_ver;
2671 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2672 } else {
2673 /* legacy */
2674 target->htc_tgt_ver = HTC_VERSION_2P0;
2675 target->msg_per_bndl_max = 0;
2676 }
2677
2678 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2679 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2680 target->htc_tgt_ver);
2681
2682 if (target->msg_per_bndl_max > 0)
2683 htc_setup_msg_bndl(target);
2684
2685 /* setup our pseudo HTC control endpoint connection */
2686 memset(&connect, 0, sizeof(connect));
2687 memset(&resp, 0, sizeof(resp));
2688 connect.ep_cb.rx = htc_ctrl_rx;
2689 connect.ep_cb.rx_refill = NULL;
2690 connect.ep_cb.tx_full = NULL;
2691 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2692 connect.svc_id = HTC_CTRL_RSVD_SVC;
2693
2694 /* connect fake service */
2695 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2696
2697 if (status)
2698 /*
2699 * FIXME: this call doesn't make sense, the caller should
2700 * call ath6kl_htc_cleanup() when it wants remove htc
2701 */
2702 ath6kl_hif_cleanup_scatter(target->dev->ar);
2703
2704 fail_wait_target:
2705 if (packet) {
2706 htc_rxpkt_reset(packet);
2707 reclaim_rx_ctrl_buf(target, packet);
2708 }
2709
2710 return status;
2711 }
2712
2713 /*
2714 * Start HTC, enable interrupts and let the target know
2715 * host has finished setup.
2716 */
2717 int ath6kl_htc_start(struct htc_target *target)
2718 {
2719 struct htc_packet *packet;
2720 int status;
2721
2722 memset(&target->dev->irq_proc_reg, 0,
2723 sizeof(target->dev->irq_proc_reg));
2724
2725 /* Disable interrupts at the chip level */
2726 ath6kl_hif_disable_intrs(target->dev);
2727
2728 target->htc_flags = 0;
2729 target->rx_st_flags = 0;
2730
2731 /* Push control receive buffers into htc control endpoint */
2732 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2733 status = htc_add_rxbuf(target, packet);
2734 if (status)
2735 return status;
2736 }
2737
2738 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2739 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2740 target->tgt_creds);
2741
2742 dump_cred_dist_stats(target);
2743
2744 /* Indicate to the target of the setup completion */
2745 status = htc_setup_tx_complete(target);
2746
2747 if (status)
2748 return status;
2749
2750 /* unmask interrupts */
2751 status = ath6kl_hif_unmask_intrs(target->dev);
2752
2753 if (status)
2754 ath6kl_htc_stop(target);
2755
2756 return status;
2757 }
2758
2759 static int ath6kl_htc_reset(struct htc_target *target)
2760 {
2761 u32 block_size, ctrl_bufsz;
2762 struct htc_packet *packet;
2763 int i;
2764
2765 reset_ep_state(target);
2766
2767 block_size = target->dev->ar->mbox_info.block_size;
2768
2769 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2770 (block_size + HTC_HDR_LENGTH) :
2771 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2772
2773 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2774 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2775 if (!packet)
2776 return -ENOMEM;
2777
2778 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2779 if (!packet->buf_start) {
2780 kfree(packet);
2781 return -ENOMEM;
2782 }
2783
2784 packet->buf_len = ctrl_bufsz;
2785 if (i < NUM_CONTROL_RX_BUFFERS) {
2786 packet->act_len = 0;
2787 packet->buf = packet->buf_start;
2788 packet->endpoint = ENDPOINT_0;
2789 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2790 } else
2791 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2792 }
2793
2794 return 0;
2795 }
2796
2797 /* htc_stop: stop interrupt reception, and flush all queued buffers */
2798 void ath6kl_htc_stop(struct htc_target *target)
2799 {
2800 spin_lock_bh(&target->htc_lock);
2801 target->htc_flags |= HTC_OP_STATE_STOPPING;
2802 spin_unlock_bh(&target->htc_lock);
2803
2804 /*
2805 * Masking interrupts is a synchronous operation, when this
2806 * function returns all pending HIF I/O has completed, we can
2807 * safely flush the queues.
2808 */
2809 ath6kl_hif_mask_intrs(target->dev);
2810
2811 ath6kl_htc_flush_txep_all(target);
2812
2813 ath6kl_htc_flush_rx_buf(target);
2814
2815 ath6kl_htc_reset(target);
2816 }
2817
2818 void *ath6kl_htc_create(struct ath6kl *ar)
2819 {
2820 struct htc_target *target = NULL;
2821 int status = 0;
2822
2823 target = kzalloc(sizeof(*target), GFP_KERNEL);
2824 if (!target) {
2825 ath6kl_err("unable to allocate memory\n");
2826 return NULL;
2827 }
2828
2829 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2830 if (!target->dev) {
2831 ath6kl_err("unable to allocate memory\n");
2832 status = -ENOMEM;
2833 goto err_htc_cleanup;
2834 }
2835
2836 spin_lock_init(&target->htc_lock);
2837 spin_lock_init(&target->rx_lock);
2838 spin_lock_init(&target->tx_lock);
2839
2840 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2841 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2842 INIT_LIST_HEAD(&target->cred_dist_list);
2843
2844 target->dev->ar = ar;
2845 target->dev->htc_cnxt = target;
2846 target->ep_waiting = ENDPOINT_MAX;
2847
2848 status = ath6kl_hif_setup(target->dev);
2849 if (status)
2850 goto err_htc_cleanup;
2851
2852 status = ath6kl_htc_reset(target);
2853 if (status)
2854 goto err_htc_cleanup;
2855
2856 return target;
2857
2858 err_htc_cleanup:
2859 ath6kl_htc_cleanup(target);
2860
2861 return NULL;
2862 }
2863
2864 /* cleanup the HTC instance */
2865 void ath6kl_htc_cleanup(struct htc_target *target)
2866 {
2867 struct htc_packet *packet, *tmp_packet;
2868
2869 /* FIXME: remove check once USB support is implemented */
2870 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
2871 ath6kl_hif_cleanup_scatter(target->dev->ar);
2872
2873 list_for_each_entry_safe(packet, tmp_packet,
2874 &target->free_ctrl_txbuf, list) {
2875 list_del(&packet->list);
2876 kfree(packet->buf_start);
2877 kfree(packet);
2878 }
2879
2880 list_for_each_entry_safe(packet, tmp_packet,
2881 &target->free_ctrl_rxbuf, list) {
2882 list_del(&packet->list);
2883 kfree(packet->buf_start);
2884 kfree(packet);
2885 }
2886
2887 kfree(target->dev);
2888 kfree(target);
2889 }