]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
brcmfmac: fix brcmf_sdcard_send_pkt() for host without sg support
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / brcm80211 / brcmfmac / bcmsdh.c
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16 /* ****************** SDIO CARD Interface Functions **************************/
17
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/export.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/sched.h>
24 #include <linux/completion.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_func.h>
28 #include <linux/mmc/card.h>
29 #include <linux/platform_data/brcmfmac-sdio.h>
30
31 #include <defs.h>
32 #include <brcm_hw_ids.h>
33 #include <brcmu_utils.h>
34 #include <brcmu_wifi.h>
35 #include <soc.h>
36 #include "dhd_bus.h"
37 #include "dhd_dbg.h"
38 #include "sdio_host.h"
39
40 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
41
42
43 static irqreturn_t brcmf_sdio_oob_irqhandler(int irq, void *dev_id)
44 {
45 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
46 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
47
48 brcmf_dbg(INTR, "OOB intr triggered\n");
49
50 /* out-of-band interrupt is level-triggered which won't
51 * be cleared until dpc
52 */
53 if (sdiodev->irq_en) {
54 disable_irq_nosync(irq);
55 sdiodev->irq_en = false;
56 }
57
58 brcmf_sdbrcm_isr(sdiodev->bus);
59
60 return IRQ_HANDLED;
61 }
62
63 static void brcmf_sdio_ib_irqhandler(struct sdio_func *func)
64 {
65 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
66 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
67
68 brcmf_dbg(INTR, "IB intr triggered\n");
69
70 brcmf_sdbrcm_isr(sdiodev->bus);
71 }
72
73 /* dummy handler for SDIO function 2 interrupt */
74 static void brcmf_sdio_dummy_irqhandler(struct sdio_func *func)
75 {
76 }
77
78 int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
79 {
80 int ret = 0;
81 u8 data;
82 unsigned long flags;
83
84 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
85 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
86 sdiodev->pdata->oob_irq_nr);
87 ret = request_irq(sdiodev->pdata->oob_irq_nr,
88 brcmf_sdio_oob_irqhandler,
89 sdiodev->pdata->oob_irq_flags,
90 "brcmf_oob_intr",
91 &sdiodev->func[1]->dev);
92 if (ret != 0) {
93 brcmf_err("request_irq failed %d\n", ret);
94 return ret;
95 }
96 sdiodev->oob_irq_requested = true;
97 spin_lock_init(&sdiodev->irq_en_lock);
98 spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
99 sdiodev->irq_en = true;
100 spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
101
102 ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
103 if (ret != 0) {
104 brcmf_err("enable_irq_wake failed %d\n", ret);
105 return ret;
106 }
107 sdiodev->irq_wake = true;
108
109 sdio_claim_host(sdiodev->func[1]);
110
111 /* must configure SDIO_CCCR_IENx to enable irq */
112 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
113 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
114 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
115
116 /* redirect, configure and enable io for interrupt signal */
117 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
118 if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
119 data |= SDIO_SEPINT_ACT_HI;
120 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
121
122 sdio_release_host(sdiodev->func[1]);
123 } else {
124 brcmf_dbg(SDIO, "Entering\n");
125 sdio_claim_host(sdiodev->func[1]);
126 sdio_claim_irq(sdiodev->func[1], brcmf_sdio_ib_irqhandler);
127 sdio_claim_irq(sdiodev->func[2], brcmf_sdio_dummy_irqhandler);
128 sdio_release_host(sdiodev->func[1]);
129 }
130
131 return 0;
132 }
133
134 int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
135 {
136 brcmf_dbg(SDIO, "Entering\n");
137
138 if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
139 sdio_claim_host(sdiodev->func[1]);
140 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
141 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
142 sdio_release_host(sdiodev->func[1]);
143
144 if (sdiodev->oob_irq_requested) {
145 sdiodev->oob_irq_requested = false;
146 if (sdiodev->irq_wake) {
147 disable_irq_wake(sdiodev->pdata->oob_irq_nr);
148 sdiodev->irq_wake = false;
149 }
150 free_irq(sdiodev->pdata->oob_irq_nr,
151 &sdiodev->func[1]->dev);
152 sdiodev->irq_en = false;
153 }
154 } else {
155 sdio_claim_host(sdiodev->func[1]);
156 sdio_release_irq(sdiodev->func[2]);
157 sdio_release_irq(sdiodev->func[1]);
158 sdio_release_host(sdiodev->func[1]);
159 }
160
161 return 0;
162 }
163
164 static int
165 brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
166 {
167 int err = 0, i;
168 u8 addr[3];
169 s32 retry;
170
171 addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
172 addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
173 addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
174
175 for (i = 0; i < 3; i++) {
176 retry = 0;
177 do {
178 if (retry)
179 usleep_range(1000, 2000);
180 err = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE,
181 SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW + i,
182 &addr[i]);
183 } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
184
185 if (err) {
186 brcmf_err("failed at addr:0x%0x\n",
187 SBSDIO_FUNC1_SBADDRLOW + i);
188 break;
189 }
190 }
191
192 return err;
193 }
194
195 static int
196 brcmf_sdio_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
197 {
198 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
199 int err = 0;
200
201 if (bar0 != sdiodev->sbwad) {
202 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
203 if (err)
204 return err;
205
206 sdiodev->sbwad = bar0;
207 }
208
209 *addr &= SBSDIO_SB_OFT_ADDR_MASK;
210
211 if (width == 4)
212 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
213
214 return 0;
215 }
216
217 int
218 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
219 void *data, bool write)
220 {
221 u8 func_num, reg_size;
222 s32 retry = 0;
223 int ret;
224
225 /*
226 * figure out how to read the register based on address range
227 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
228 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
229 * The rest: function 1 silicon backplane core registers
230 */
231 if ((addr & ~REG_F0_REG_MASK) == 0) {
232 func_num = SDIO_FUNC_0;
233 reg_size = 1;
234 } else if ((addr & ~REG_F1_MISC_MASK) == 0) {
235 func_num = SDIO_FUNC_1;
236 reg_size = 1;
237 } else {
238 func_num = SDIO_FUNC_1;
239 reg_size = 4;
240
241 ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
242 if (ret)
243 goto done;
244 }
245
246 do {
247 if (!write)
248 memset(data, 0, reg_size);
249 if (retry) /* wait for 1 ms till bus get settled down */
250 usleep_range(1000, 2000);
251 if (reg_size == 1)
252 ret = brcmf_sdioh_request_byte(sdiodev, write,
253 func_num, addr, data);
254 else
255 ret = brcmf_sdioh_request_word(sdiodev, write,
256 func_num, addr, data, 4);
257 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
258
259 done:
260 if (ret != 0)
261 brcmf_err("failed with %d\n", ret);
262
263 return ret;
264 }
265
266 u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
267 {
268 u8 data;
269 int retval;
270
271 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
272 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
273 brcmf_dbg(SDIO, "data:0x%02x\n", data);
274
275 if (ret)
276 *ret = retval;
277
278 return data;
279 }
280
281 u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
282 {
283 u32 data;
284 int retval;
285
286 brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
287 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
288 brcmf_dbg(SDIO, "data:0x%08x\n", data);
289
290 if (ret)
291 *ret = retval;
292
293 return data;
294 }
295
296 void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
297 u8 data, int *ret)
298 {
299 int retval;
300
301 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
302 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
303
304 if (ret)
305 *ret = retval;
306 }
307
308 void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
309 u32 data, int *ret)
310 {
311 int retval;
312
313 brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
314 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
315
316 if (ret)
317 *ret = retval;
318 }
319
320 static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
321 bool write, u32 addr, struct sk_buff *pkt)
322 {
323 unsigned int req_sz;
324
325 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
326 if (brcmf_pm_resume_error(sdiodev))
327 return -EIO;
328
329 /* Single skb use the standard mmc interface */
330 req_sz = pkt->len + 3;
331 req_sz &= (uint)~3;
332
333 if (write)
334 return sdio_memcpy_toio(sdiodev->func[fn], addr,
335 ((u8 *)(pkt->data)),
336 req_sz);
337 else if (fn == 1)
338 return sdio_memcpy_fromio(sdiodev->func[fn],
339 ((u8 *)(pkt->data)),
340 addr, req_sz);
341 else
342 /* function 2 read is FIFO operation */
343 return sdio_readsb(sdiodev->func[fn],
344 ((u8 *)(pkt->data)), addr,
345 req_sz);
346 }
347
348 /**
349 * brcmf_sdio_sglist_rw - SDIO interface function for block data access
350 * @sdiodev: brcmfmac sdio device
351 * @fn: SDIO function number
352 * @write: direction flag
353 * @addr: dongle memory address as source/destination
354 * @pkt: skb pointer
355 *
356 * This function takes the respbonsibility as the interface function to MMC
357 * stack for block data access. It assumes that the skb passed down by the
358 * caller has already been padded and aligned.
359 */
360 static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
361 bool write, u32 addr,
362 struct sk_buff_head *pktlist)
363 {
364 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
365 unsigned int max_req_sz, orig_offset, dst_offset;
366 unsigned short max_seg_cnt, seg_sz;
367 unsigned char *pkt_data, *orig_data, *dst_data;
368 struct sk_buff *pkt_next = NULL, *local_pkt_next;
369 struct sk_buff_head local_list, *target_list;
370 struct mmc_request mmc_req;
371 struct mmc_command mmc_cmd;
372 struct mmc_data mmc_dat;
373 struct sg_table st;
374 struct scatterlist *sgl;
375 int ret = 0;
376
377 if (!pktlist->qlen)
378 return -EINVAL;
379
380 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
381 if (brcmf_pm_resume_error(sdiodev))
382 return -EIO;
383
384 target_list = pktlist;
385 /* for host with broken sg support, prepare a page aligned list */
386 __skb_queue_head_init(&local_list);
387 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
388 req_sz = 0;
389 skb_queue_walk(pktlist, pkt_next)
390 req_sz += pkt_next->len;
391 req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
392 while (req_sz > PAGE_SIZE) {
393 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
394 if (pkt_next == NULL) {
395 ret = -ENOMEM;
396 goto exit;
397 }
398 __skb_queue_tail(&local_list, pkt_next);
399 req_sz -= PAGE_SIZE;
400 }
401 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
402 if (pkt_next == NULL) {
403 ret = -ENOMEM;
404 goto exit;
405 }
406 __skb_queue_tail(&local_list, pkt_next);
407 target_list = &local_list;
408 }
409
410 func_blk_sz = sdiodev->func[fn]->cur_blksize;
411 max_req_sz = sdiodev->max_request_size;
412 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
413 target_list->qlen);
414 seg_sz = target_list->qlen;
415 pkt_offset = 0;
416 pkt_next = target_list->next;
417
418 if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
419 ret = -ENOMEM;
420 goto exit;
421 }
422
423 memset(&mmc_req, 0, sizeof(struct mmc_request));
424 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
425 memset(&mmc_dat, 0, sizeof(struct mmc_data));
426
427 mmc_dat.sg = st.sgl;
428 mmc_dat.blksz = func_blk_sz;
429 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
430 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
431 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
432 mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
433 mmc_cmd.arg |= 1<<27; /* block mode */
434 /* for function 1 the addr will be incremented */
435 mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
436 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
437 mmc_req.cmd = &mmc_cmd;
438 mmc_req.data = &mmc_dat;
439
440 while (seg_sz) {
441 req_sz = 0;
442 sg_cnt = 0;
443 sgl = st.sgl;
444 /* prep sg table */
445 while (pkt_next != (struct sk_buff *)target_list) {
446 pkt_data = pkt_next->data + pkt_offset;
447 sg_data_sz = pkt_next->len - pkt_offset;
448 if (sg_data_sz > sdiodev->max_segment_size)
449 sg_data_sz = sdiodev->max_segment_size;
450 if (sg_data_sz > max_req_sz - req_sz)
451 sg_data_sz = max_req_sz - req_sz;
452
453 sg_set_buf(sgl, pkt_data, sg_data_sz);
454
455 sg_cnt++;
456 sgl = sg_next(sgl);
457 req_sz += sg_data_sz;
458 pkt_offset += sg_data_sz;
459 if (pkt_offset == pkt_next->len) {
460 pkt_offset = 0;
461 pkt_next = pkt_next->next;
462 }
463
464 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
465 break;
466 }
467 seg_sz -= sg_cnt;
468
469 if (req_sz % func_blk_sz != 0) {
470 brcmf_err("sg request length %u is not %u aligned\n",
471 req_sz, func_blk_sz);
472 ret = -ENOTBLK;
473 goto exit;
474 }
475
476 mmc_dat.sg_len = sg_cnt;
477 mmc_dat.blocks = req_sz / func_blk_sz;
478 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
479 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
480 /* incrementing addr for function 1 */
481 if (fn == 1)
482 addr += req_sz;
483
484 mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
485 mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
486
487 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
488 if (ret != 0) {
489 brcmf_err("CMD53 sg block %s failed %d\n",
490 write ? "write" : "read", ret);
491 ret = -EIO;
492 break;
493 }
494 }
495
496 if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
497 local_pkt_next = local_list.next;
498 orig_offset = 0;
499 skb_queue_walk(pktlist, pkt_next) {
500 dst_offset = 0;
501 do {
502 req_sz = local_pkt_next->len - orig_offset;
503 req_sz = min_t(uint, pkt_next->len - dst_offset,
504 req_sz);
505 orig_data = local_pkt_next->data + orig_offset;
506 dst_data = pkt_next->data + dst_offset;
507 memcpy(dst_data, orig_data, req_sz);
508 orig_offset += req_sz;
509 dst_offset += req_sz;
510 if (orig_offset == local_pkt_next->len) {
511 orig_offset = 0;
512 local_pkt_next = local_pkt_next->next;
513 }
514 if (dst_offset == pkt_next->len)
515 break;
516 } while (!skb_queue_empty(&local_list));
517 }
518 }
519
520 exit:
521 sg_free_table(&st);
522 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
523 brcmu_pkt_buf_free_skb(pkt_next);
524
525 return ret;
526 }
527
528 int
529 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
530 uint flags, u8 *buf, uint nbytes)
531 {
532 struct sk_buff *mypkt;
533 int err;
534
535 mypkt = brcmu_pkt_buf_get_skb(nbytes);
536 if (!mypkt) {
537 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
538 nbytes);
539 return -EIO;
540 }
541
542 err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
543 if (!err)
544 memcpy(buf, mypkt->data, nbytes);
545
546 brcmu_pkt_buf_free_skb(mypkt);
547 return err;
548 }
549
550 int
551 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
552 uint flags, struct sk_buff *pkt)
553 {
554 uint width;
555 int err = 0;
556
557 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
558 fn, addr, pkt->len);
559
560 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
561 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
562 if (err)
563 goto done;
564
565 err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
566
567 done:
568 return err;
569 }
570
571 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
572 uint flags, struct sk_buff_head *pktq)
573 {
574 uint width;
575 int err = 0;
576
577 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
578 fn, addr, pktq->qlen);
579
580 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
581 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
582 if (err)
583 goto done;
584
585 err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
586
587 done:
588 return err;
589 }
590
591 int
592 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
593 uint flags, u8 *buf, uint nbytes)
594 {
595 struct sk_buff *mypkt;
596 uint width;
597 int err;
598
599 mypkt = brcmu_pkt_buf_get_skb(nbytes);
600 if (!mypkt) {
601 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
602 nbytes);
603 return -EIO;
604 }
605
606 memcpy(mypkt->data, buf, nbytes);
607
608 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
609 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
610
611 if (!err)
612 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
613
614 brcmu_pkt_buf_free_skb(mypkt);
615 return err;
616
617 }
618
619 int
620 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
621 uint flags, struct sk_buff_head *pktq)
622 {
623 struct sk_buff *skb;
624 uint width;
625 int err;
626
627 brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
628 fn, addr, pktq->qlen);
629
630 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
631 err = brcmf_sdio_addrprep(sdiodev, width, &addr);
632 if (err)
633 return err;
634
635 if (pktq->qlen == 1 || !sdiodev->sg_support)
636 skb_queue_walk(pktq, skb) {
637 err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
638 if (err)
639 break;
640 }
641 else
642 err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
643
644 return err;
645 }
646
647 int
648 brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
649 u8 *data, uint size)
650 {
651 int bcmerror = 0;
652 struct sk_buff *pkt;
653 u32 sdaddr;
654 uint dsize;
655
656 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
657 pkt = dev_alloc_skb(dsize);
658 if (!pkt) {
659 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
660 return -EIO;
661 }
662 pkt->priority = 0;
663
664 /* Determine initial transfer parameters */
665 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
666 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
667 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
668 else
669 dsize = size;
670
671 sdio_claim_host(sdiodev->func[1]);
672
673 /* Do the transfer(s) */
674 while (size) {
675 /* Set the backplane window to include the start address */
676 bcmerror = brcmf_sdcard_set_sbaddr_window(sdiodev, address);
677 if (bcmerror)
678 break;
679
680 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
681 write ? "write" : "read", dsize,
682 sdaddr, address & SBSDIO_SBWINDOW_MASK);
683
684 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
685 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
686
687 skb_put(pkt, dsize);
688 if (write)
689 memcpy(pkt->data, data, dsize);
690 bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
691 sdaddr, pkt);
692 if (bcmerror) {
693 brcmf_err("membytes transfer failed\n");
694 break;
695 }
696 if (!write)
697 memcpy(data, pkt->data, dsize);
698 skb_trim(pkt, dsize);
699
700 /* Adjust for next transfer (if any) */
701 size -= dsize;
702 if (size) {
703 data += dsize;
704 address += dsize;
705 sdaddr = 0;
706 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
707 }
708 }
709
710 dev_kfree_skb(pkt);
711
712 /* Return the window to backplane enumeration space for core access */
713 if (brcmf_sdcard_set_sbaddr_window(sdiodev, sdiodev->sbwad))
714 brcmf_err("FAILED to set window back to 0x%x\n",
715 sdiodev->sbwad);
716
717 sdio_release_host(sdiodev->func[1]);
718
719 return bcmerror;
720 }
721
722 int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
723 {
724 char t_func = (char)fn;
725 brcmf_dbg(SDIO, "Enter\n");
726
727 /* issue abort cmd52 command through F0 */
728 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
729 SDIO_CCCR_ABORT, &t_func);
730
731 brcmf_dbg(SDIO, "Exit\n");
732 return 0;
733 }
734
735 int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
736 {
737 u32 regs = 0;
738 int ret = 0;
739
740 ret = brcmf_sdioh_attach(sdiodev);
741 if (ret)
742 goto out;
743
744 regs = SI_ENUM_BASE;
745
746 /* try to attach to the target device */
747 sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
748 if (!sdiodev->bus) {
749 brcmf_err("device attach failed\n");
750 ret = -ENODEV;
751 goto out;
752 }
753
754 out:
755 if (ret)
756 brcmf_sdio_remove(sdiodev);
757
758 return ret;
759 }
760 EXPORT_SYMBOL(brcmf_sdio_probe);
761
762 int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
763 {
764 sdiodev->bus_if->state = BRCMF_BUS_DOWN;
765
766 if (sdiodev->bus) {
767 brcmf_sdbrcm_disconnect(sdiodev->bus);
768 sdiodev->bus = NULL;
769 }
770
771 brcmf_sdioh_detach(sdiodev);
772
773 sdiodev->sbwad = 0;
774
775 return 0;
776 }
777 EXPORT_SYMBOL(brcmf_sdio_remove);
778
779 void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
780 {
781 if (enable)
782 brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
783 else
784 brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
785 }