]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/wireless/ath/ath6kl/bmi.c
Merge branch 'next/devel' of git://git.linaro.org/people/arnd/arm-soc
[mirror_ubuntu-eoan-kernel.git] / drivers / net / wireless / ath / ath6kl / bmi.c
1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "core.h"
18 #include "hif-ops.h"
19 #include "target.h"
20 #include "debug.h"
21
22 static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
23 {
24 u32 addr;
25 unsigned long timeout;
26 int ret;
27
28 ar->bmi.cmd_credits = 0;
29
30 /* Read the counter register to get the command credits */
31 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
32
33 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
35
36 /*
37 * Hit the credit counter with a 4-byte access, the first byte
38 * read will hit the counter and cause a decrement, while the
39 * remaining 3 bytes has no effect. The rationale behind this
40 * is to make all HIF accesses 4-byte aligned.
41 */
42 ret = hif_read_write_sync(ar, addr,
43 (u8 *)&ar->bmi.cmd_credits, 4,
44 HIF_RD_SYNC_BYTE_INC);
45 if (ret) {
46 ath6kl_err("Unable to decrement the command credit count register: %d\n",
47 ret);
48 return ret;
49 }
50
51 /* The counter is only 8 bits.
52 * Ignore anything in the upper 3 bytes
53 */
54 ar->bmi.cmd_credits &= 0xFF;
55 }
56
57 if (!ar->bmi.cmd_credits) {
58 ath6kl_err("bmi communication timeout\n");
59 return -ETIMEDOUT;
60 }
61
62 return 0;
63 }
64
65 static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
66 {
67 unsigned long timeout;
68 u32 rx_word = 0;
69 int ret = 0;
70
71 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 while (time_before(jiffies, timeout) && !rx_word) {
73 ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 (u8 *)&rx_word, sizeof(rx_word),
75 HIF_RD_SYNC_BYTE_INC);
76 if (ret) {
77 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
78 return ret;
79 }
80
81 /* all we really want is one bit */
82 rx_word &= (1 << ENDPOINT1);
83 }
84
85 if (!rx_word) {
86 ath6kl_err("bmi_recv_buf FIFO empty\n");
87 return -EINVAL;
88 }
89
90 return ret;
91 }
92
93 static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
94 {
95 int ret;
96 u32 addr;
97
98 ret = ath6kl_get_bmi_cmd_credits(ar);
99 if (ret)
100 return ret;
101
102 addr = ar->mbox_info.htc_addr;
103
104 ret = hif_read_write_sync(ar, addr, buf, len,
105 HIF_WR_SYNC_BYTE_INC);
106 if (ret)
107 ath6kl_err("unable to send the bmi data to the device\n");
108
109 return ret;
110 }
111
112 static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
113 {
114 int ret;
115 u32 addr;
116
117 /*
118 * During normal bootup, small reads may be required.
119 * Rather than issue an HIF Read and then wait as the Target
120 * adds successive bytes to the FIFO, we wait here until
121 * we know that response data is available.
122 *
123 * This allows us to cleanly timeout on an unexpected
124 * Target failure rather than risk problems at the HIF level.
125 * In particular, this avoids SDIO timeouts and possibly garbage
126 * data on some host controllers. And on an interconnect
127 * such as Compact Flash (as well as some SDIO masters) which
128 * does not provide any indication on data timeout, it avoids
129 * a potential hang or garbage response.
130 *
131 * Synchronization is more difficult for reads larger than the
132 * size of the MBOX FIFO (128B), because the Target is unable
133 * to push the 129th byte of data until AFTER the Host posts an
134 * HIF Read and removes some FIFO data. So for large reads the
135 * Host proceeds to post an HIF Read BEFORE all the data is
136 * actually available to read. Fortunately, large BMI reads do
137 * not occur in practice -- they're supported for debug/development.
138 *
139 * So Host/Target BMI synchronization is divided into these cases:
140 * CASE 1: length < 4
141 * Should not happen
142 *
143 * CASE 2: 4 <= length <= 128
144 * Wait for first 4 bytes to be in FIFO
145 * If CONSERVATIVE_BMI_READ is enabled, also wait for
146 * a BMI command credit, which indicates that the ENTIRE
147 * response is available in the the FIFO
148 *
149 * CASE 3: length > 128
150 * Wait for the first 4 bytes to be in FIFO
151 *
152 * For most uses, a small timeout should be sufficient and we will
153 * usually see a response quickly; but there may be some unusual
154 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
155 * For now, we use an unbounded busy loop while waiting for
156 * BMI_EXECUTE.
157 *
158 * If BMI_EXECUTE ever needs to support longer-latency execution,
159 * especially in production, this code needs to be enhanced to sleep
160 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
161 * a function of Host processor speed.
162 */
163 if (len >= 4) { /* NB: Currently, always true */
164 ret = ath6kl_bmi_get_rx_lkahd(ar);
165 if (ret)
166 return ret;
167 }
168
169 addr = ar->mbox_info.htc_addr;
170 ret = hif_read_write_sync(ar, addr, buf, len,
171 HIF_RD_SYNC_BYTE_INC);
172 if (ret) {
173 ath6kl_err("Unable to read the bmi data from the device: %d\n",
174 ret);
175 return ret;
176 }
177
178 return 0;
179 }
180
181 int ath6kl_bmi_done(struct ath6kl *ar)
182 {
183 int ret;
184 u32 cid = BMI_DONE;
185
186 if (ar->bmi.done_sent) {
187 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
188 return 0;
189 }
190
191 ar->bmi.done_sent = true;
192
193 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
194 if (ret) {
195 ath6kl_err("Unable to send bmi done: %d\n", ret);
196 return ret;
197 }
198
199 ath6kl_bmi_cleanup(ar);
200
201 return 0;
202 }
203
204 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
205 struct ath6kl_bmi_target_info *targ_info)
206 {
207 int ret;
208 u32 cid = BMI_GET_TARGET_INFO;
209
210 if (ar->bmi.done_sent) {
211 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
212 return -EACCES;
213 }
214
215 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
216 if (ret) {
217 ath6kl_err("Unable to send get target info: %d\n", ret);
218 return ret;
219 }
220
221 ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
222 sizeof(targ_info->version));
223 if (ret) {
224 ath6kl_err("Unable to recv target info: %d\n", ret);
225 return ret;
226 }
227
228 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
229 /* Determine how many bytes are in the Target's targ_info */
230 ret = ath6kl_bmi_recv_buf(ar,
231 (u8 *)&targ_info->byte_count,
232 sizeof(targ_info->byte_count));
233 if (ret) {
234 ath6kl_err("unable to read target info byte count: %d\n",
235 ret);
236 return ret;
237 }
238
239 /*
240 * The target's targ_info doesn't match the host's targ_info.
241 * We need to do some backwards compatibility to make this work.
242 */
243 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
244 WARN_ON(1);
245 return -EINVAL;
246 }
247
248 /* Read the remainder of the targ_info */
249 ret = ath6kl_bmi_recv_buf(ar,
250 ((u8 *)targ_info) +
251 sizeof(targ_info->byte_count),
252 sizeof(*targ_info) -
253 sizeof(targ_info->byte_count));
254
255 if (ret) {
256 ath6kl_err("Unable to read target info (%d bytes): %d\n",
257 targ_info->byte_count, ret);
258 return ret;
259 }
260 }
261
262 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
263 targ_info->version, targ_info->type);
264
265 return 0;
266 }
267
268 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
269 {
270 u32 cid = BMI_READ_MEMORY;
271 int ret;
272 u32 offset;
273 u32 len_remain, rx_len;
274 u16 size;
275
276 if (ar->bmi.done_sent) {
277 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
278 return -EACCES;
279 }
280
281 size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
282 if (size > MAX_BMI_CMDBUF_SZ) {
283 WARN_ON(1);
284 return -EINVAL;
285 }
286 memset(ar->bmi.cmd_buf, 0, size);
287
288 ath6kl_dbg(ATH6KL_DBG_BMI,
289 "bmi read memory: device: addr: 0x%x, len: %d\n",
290 addr, len);
291
292 len_remain = len;
293
294 while (len_remain) {
295 rx_len = (len_remain < BMI_DATASZ_MAX) ?
296 len_remain : BMI_DATASZ_MAX;
297 offset = 0;
298 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
299 offset += sizeof(cid);
300 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
301 offset += sizeof(addr);
302 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
303 offset += sizeof(len);
304
305 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
306 if (ret) {
307 ath6kl_err("Unable to write to the device: %d\n",
308 ret);
309 return ret;
310 }
311 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
312 if (ret) {
313 ath6kl_err("Unable to read from the device: %d\n",
314 ret);
315 return ret;
316 }
317 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
318 len_remain -= rx_len; addr += rx_len;
319 }
320
321 return 0;
322 }
323
324 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
325 {
326 u32 cid = BMI_WRITE_MEMORY;
327 int ret;
328 u32 offset;
329 u32 len_remain, tx_len;
330 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
331 u8 aligned_buf[BMI_DATASZ_MAX];
332 u8 *src;
333
334 if (ar->bmi.done_sent) {
335 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
336 return -EACCES;
337 }
338
339 if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
340 WARN_ON(1);
341 return -EINVAL;
342 }
343
344 memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
345
346 ath6kl_dbg(ATH6KL_DBG_BMI,
347 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
348
349 len_remain = len;
350 while (len_remain) {
351 src = &buf[len - len_remain];
352
353 if (len_remain < (BMI_DATASZ_MAX - header)) {
354 if (len_remain & 3) {
355 /* align it with 4 bytes */
356 len_remain = len_remain +
357 (4 - (len_remain & 3));
358 memcpy(aligned_buf, src, len_remain);
359 src = aligned_buf;
360 }
361 tx_len = len_remain;
362 } else {
363 tx_len = (BMI_DATASZ_MAX - header);
364 }
365
366 offset = 0;
367 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
368 offset += sizeof(cid);
369 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
370 offset += sizeof(addr);
371 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
372 offset += sizeof(tx_len);
373 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
374 offset += tx_len;
375
376 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
377 if (ret) {
378 ath6kl_err("Unable to write to the device: %d\n",
379 ret);
380 return ret;
381 }
382 len_remain -= tx_len; addr += tx_len;
383 }
384
385 return 0;
386 }
387
388 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
389 {
390 u32 cid = BMI_EXECUTE;
391 int ret;
392 u32 offset;
393 u16 size;
394
395 if (ar->bmi.done_sent) {
396 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
397 return -EACCES;
398 }
399
400 size = sizeof(cid) + sizeof(addr) + sizeof(param);
401 if (size > MAX_BMI_CMDBUF_SZ) {
402 WARN_ON(1);
403 return -EINVAL;
404 }
405 memset(ar->bmi.cmd_buf, 0, size);
406
407 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
408 addr, *param);
409
410 offset = 0;
411 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
412 offset += sizeof(cid);
413 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
414 offset += sizeof(addr);
415 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
416 offset += sizeof(*param);
417
418 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
419 if (ret) {
420 ath6kl_err("Unable to write to the device: %d\n", ret);
421 return ret;
422 }
423
424 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
425 if (ret) {
426 ath6kl_err("Unable to read from the device: %d\n", ret);
427 return ret;
428 }
429
430 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
431
432 return 0;
433 }
434
435 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
436 {
437 u32 cid = BMI_SET_APP_START;
438 int ret;
439 u32 offset;
440 u16 size;
441
442 if (ar->bmi.done_sent) {
443 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
444 return -EACCES;
445 }
446
447 size = sizeof(cid) + sizeof(addr);
448 if (size > MAX_BMI_CMDBUF_SZ) {
449 WARN_ON(1);
450 return -EINVAL;
451 }
452 memset(ar->bmi.cmd_buf, 0, size);
453
454 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
455
456 offset = 0;
457 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
458 offset += sizeof(cid);
459 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
460 offset += sizeof(addr);
461
462 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
463 if (ret) {
464 ath6kl_err("Unable to write to the device: %d\n", ret);
465 return ret;
466 }
467
468 return 0;
469 }
470
471 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
472 {
473 u32 cid = BMI_READ_SOC_REGISTER;
474 int ret;
475 u32 offset;
476 u16 size;
477
478 if (ar->bmi.done_sent) {
479 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
480 return -EACCES;
481 }
482
483 size = sizeof(cid) + sizeof(addr);
484 if (size > MAX_BMI_CMDBUF_SZ) {
485 WARN_ON(1);
486 return -EINVAL;
487 }
488 memset(ar->bmi.cmd_buf, 0, size);
489
490 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
491
492 offset = 0;
493 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
494 offset += sizeof(cid);
495 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
496 offset += sizeof(addr);
497
498 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
499 if (ret) {
500 ath6kl_err("Unable to write to the device: %d\n", ret);
501 return ret;
502 }
503
504 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
505 if (ret) {
506 ath6kl_err("Unable to read from the device: %d\n", ret);
507 return ret;
508 }
509 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
510
511 return 0;
512 }
513
514 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
515 {
516 u32 cid = BMI_WRITE_SOC_REGISTER;
517 int ret;
518 u32 offset;
519 u16 size;
520
521 if (ar->bmi.done_sent) {
522 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
523 return -EACCES;
524 }
525
526 size = sizeof(cid) + sizeof(addr) + sizeof(param);
527 if (size > MAX_BMI_CMDBUF_SZ) {
528 WARN_ON(1);
529 return -EINVAL;
530 }
531 memset(ar->bmi.cmd_buf, 0, size);
532
533 ath6kl_dbg(ATH6KL_DBG_BMI,
534 "bmi write SOC reg: addr: 0x%x, param: %d\n",
535 addr, param);
536
537 offset = 0;
538 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
539 offset += sizeof(cid);
540 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
541 offset += sizeof(addr);
542 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
543 offset += sizeof(param);
544
545 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
546 if (ret) {
547 ath6kl_err("Unable to write to the device: %d\n", ret);
548 return ret;
549 }
550
551 return 0;
552 }
553
554 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
555 {
556 u32 cid = BMI_LZ_DATA;
557 int ret;
558 u32 offset;
559 u32 len_remain, tx_len;
560 const u32 header = sizeof(cid) + sizeof(len);
561 u16 size;
562
563 if (ar->bmi.done_sent) {
564 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
565 return -EACCES;
566 }
567
568 size = BMI_DATASZ_MAX + header;
569 if (size > MAX_BMI_CMDBUF_SZ) {
570 WARN_ON(1);
571 return -EINVAL;
572 }
573 memset(ar->bmi.cmd_buf, 0, size);
574
575 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
576 len);
577
578 len_remain = len;
579 while (len_remain) {
580 tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
581 len_remain : (BMI_DATASZ_MAX - header);
582
583 offset = 0;
584 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
585 offset += sizeof(cid);
586 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
587 offset += sizeof(tx_len);
588 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
589 tx_len);
590 offset += tx_len;
591
592 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
593 if (ret) {
594 ath6kl_err("Unable to write to the device: %d\n",
595 ret);
596 return ret;
597 }
598
599 len_remain -= tx_len;
600 }
601
602 return 0;
603 }
604
605 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
606 {
607 u32 cid = BMI_LZ_STREAM_START;
608 int ret;
609 u32 offset;
610 u16 size;
611
612 if (ar->bmi.done_sent) {
613 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
614 return -EACCES;
615 }
616
617 size = sizeof(cid) + sizeof(addr);
618 if (size > MAX_BMI_CMDBUF_SZ) {
619 WARN_ON(1);
620 return -EINVAL;
621 }
622 memset(ar->bmi.cmd_buf, 0, size);
623
624 ath6kl_dbg(ATH6KL_DBG_BMI,
625 "bmi LZ stream start: addr: 0x%x)\n",
626 addr);
627
628 offset = 0;
629 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
630 offset += sizeof(cid);
631 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
632 offset += sizeof(addr);
633
634 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
635 if (ret) {
636 ath6kl_err("Unable to start LZ stream to the device: %d\n",
637 ret);
638 return ret;
639 }
640
641 return 0;
642 }
643
644 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
645 {
646 int ret;
647 u32 last_word = 0;
648 u32 last_word_offset = len & ~0x3;
649 u32 unaligned_bytes = len & 0x3;
650
651 ret = ath6kl_bmi_lz_stream_start(ar, addr);
652 if (ret)
653 return ret;
654
655 if (unaligned_bytes) {
656 /* copy the last word into a zero padded buffer */
657 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
658 }
659
660 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
661 if (ret)
662 return ret;
663
664 if (unaligned_bytes)
665 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
666
667 if (!ret) {
668 /* Close compressed stream and open a new (fake) one.
669 * This serves mainly to flush Target caches. */
670 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
671 }
672 return ret;
673 }
674
675 int ath6kl_bmi_init(struct ath6kl *ar)
676 {
677 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
678
679 if (!ar->bmi.cmd_buf)
680 return -ENOMEM;
681
682 return 0;
683 }
684
685 void ath6kl_bmi_cleanup(struct ath6kl *ar)
686 {
687 kfree(ar->bmi.cmd_buf);
688 ar->bmi.cmd_buf = NULL;
689 }