]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/wireless/cw1200/bh.c
cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets
[mirror_ubuntu-artful-kernel.git] / drivers / net / wireless / cw1200 / bh.c
1 /*
2 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * Based on:
8 * ST-Ericsson UMAC CW1200 driver, which is
9 * Copyright (c) 2010, ST-Ericsson
10 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #include <linux/module.h>
18 #include <net/mac80211.h>
19 #include <linux/kthread.h>
20 #include <linux/timer.h>
21
22 #include "cw1200.h"
23 #include "bh.h"
24 #include "hwio.h"
25 #include "wsm.h"
26 #include "sbus.h"
27 #include "debug.h"
28 #include "fwio.h"
29
30 static int cw1200_bh(void *arg);
31
32 #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
33 /* an SPI message cannot be bigger than (2"12-1)*2 bytes
34 * "*2" to cvt to bytes */
35 #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
36 #define PIGGYBACK_CTRL_REG (2)
37 #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
38
39 /* Suspend state privates */
40 enum cw1200_bh_pm_state {
41 CW1200_BH_RESUMED = 0,
42 CW1200_BH_SUSPEND,
43 CW1200_BH_SUSPENDED,
44 CW1200_BH_RESUME,
45 };
46
47 typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
48 u8 *data, size_t size);
49
50 static void cw1200_bh_work(struct work_struct *work)
51 {
52 struct cw1200_common *priv =
53 container_of(work, struct cw1200_common, bh_work);
54 cw1200_bh(priv);
55 }
56
57 int cw1200_register_bh(struct cw1200_common *priv)
58 {
59 int err = 0;
60 /* Realtime workqueue */
61 priv->bh_workqueue = alloc_workqueue("cw1200_bh",
62 WQ_MEM_RECLAIM | WQ_HIGHPRI
63 | WQ_CPU_INTENSIVE, 1);
64
65 if (!priv->bh_workqueue)
66 return -ENOMEM;
67
68 INIT_WORK(&priv->bh_work, cw1200_bh_work);
69
70 pr_debug("[BH] register.\n");
71
72 atomic_set(&priv->bh_rx, 0);
73 atomic_set(&priv->bh_tx, 0);
74 atomic_set(&priv->bh_term, 0);
75 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
76 priv->bh_error = 0;
77 priv->hw_bufs_used = 0;
78 priv->buf_id_tx = 0;
79 priv->buf_id_rx = 0;
80 init_waitqueue_head(&priv->bh_wq);
81 init_waitqueue_head(&priv->bh_evt_wq);
82
83 err = !queue_work(priv->bh_workqueue, &priv->bh_work);
84 WARN_ON(err);
85 return err;
86 }
87
88 void cw1200_unregister_bh(struct cw1200_common *priv)
89 {
90 atomic_add(1, &priv->bh_term);
91 wake_up(&priv->bh_wq);
92
93 flush_workqueue(priv->bh_workqueue);
94
95 destroy_workqueue(priv->bh_workqueue);
96 priv->bh_workqueue = NULL;
97
98 pr_debug("[BH] unregistered.\n");
99 }
100
101 void cw1200_irq_handler(struct cw1200_common *priv)
102 {
103 pr_debug("[BH] irq.\n");
104
105 /* Disable Interrupts! */
106 /* NOTE: sbus_ops->lock already held */
107 __cw1200_irq_enable(priv, 0);
108
109 if (/* WARN_ON */(priv->bh_error))
110 return;
111
112 if (atomic_add_return(1, &priv->bh_rx) == 1)
113 wake_up(&priv->bh_wq);
114 }
115 EXPORT_SYMBOL_GPL(cw1200_irq_handler);
116
117 void cw1200_bh_wakeup(struct cw1200_common *priv)
118 {
119 pr_debug("[BH] wakeup.\n");
120 if (priv->bh_error) {
121 pr_err("[BH] wakeup failed (BH error)\n");
122 return;
123 }
124
125 if (atomic_add_return(1, &priv->bh_tx) == 1)
126 wake_up(&priv->bh_wq);
127 }
128
129 int cw1200_bh_suspend(struct cw1200_common *priv)
130 {
131 pr_debug("[BH] suspend.\n");
132 if (priv->bh_error) {
133 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
134 return -EINVAL;
135 }
136
137 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
138 wake_up(&priv->bh_wq);
139 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
140 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
141 1 * HZ) ? 0 : -ETIMEDOUT;
142 }
143
144 int cw1200_bh_resume(struct cw1200_common *priv)
145 {
146 pr_debug("[BH] resume.\n");
147 if (priv->bh_error) {
148 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
149 return -EINVAL;
150 }
151
152 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
153 wake_up(&priv->bh_wq);
154 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
155 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
156 1 * HZ) ? 0 : -ETIMEDOUT;
157 }
158
159 static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
160 {
161 ++priv->hw_bufs_used;
162 }
163
164 int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
165 {
166 int ret = 0;
167 int hw_bufs_used = priv->hw_bufs_used;
168
169 priv->hw_bufs_used -= count;
170 if (WARN_ON(priv->hw_bufs_used < 0))
171 ret = -1;
172 else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
173 ret = 1;
174 if (!priv->hw_bufs_used)
175 wake_up(&priv->bh_evt_wq);
176 return ret;
177 }
178
179 static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
180 u16 *ctrl_reg)
181 {
182 int ret;
183
184 ret = cw1200_reg_read_16(priv,
185 ST90TDS_CONTROL_REG_ID, ctrl_reg);
186 if (ret) {
187 ret = cw1200_reg_read_16(priv,
188 ST90TDS_CONTROL_REG_ID, ctrl_reg);
189 if (ret)
190 pr_err("[BH] Failed to read control register.\n");
191 }
192
193 return ret;
194 }
195
196 static int cw1200_device_wakeup(struct cw1200_common *priv)
197 {
198 u16 ctrl_reg;
199 int ret;
200
201 pr_debug("[BH] Device wakeup.\n");
202
203 /* First, set the dpll register */
204 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
205 cw1200_dpll_from_clk(priv->hw_refclk));
206 if (WARN_ON(ret))
207 return ret;
208
209 /* To force the device to be always-on, the host sets WLAN_UP to 1 */
210 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
211 ST90TDS_CONT_WUP_BIT);
212 if (WARN_ON(ret))
213 return ret;
214
215 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
216 if (WARN_ON(ret))
217 return ret;
218
219 /* If the device returns WLAN_RDY as 1, the device is active and will
220 * remain active. */
221 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
222 pr_debug("[BH] Device awake.\n");
223 return 1;
224 }
225
226 return 0;
227 }
228
229 /* Must be called from BH thraed. */
230 void cw1200_enable_powersave(struct cw1200_common *priv,
231 bool enable)
232 {
233 pr_debug("[BH] Powerave is %s.\n",
234 enable ? "enabled" : "disabled");
235 priv->powersave_enabled = enable;
236 }
237
238 static int cw1200_bh_rx_helper(struct cw1200_common *priv,
239 uint16_t *ctrl_reg,
240 int *tx)
241 {
242 size_t read_len = 0;
243 struct sk_buff *skb_rx = NULL;
244 struct wsm_hdr *wsm;
245 size_t wsm_len;
246 u16 wsm_id;
247 u8 wsm_seq;
248 int rx_resync = 1;
249
250 size_t alloc_len;
251 u8 *data;
252
253 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
254 if (!read_len)
255 return 0; /* No more work */
256
257 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
258 (read_len > EFFECTIVE_BUF_SIZE))) {
259 pr_debug("Invalid read len: %zu (%04x)",
260 read_len, *ctrl_reg);
261 goto err;
262 }
263
264 /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
265 * to the NEXT Message length + 2 Bytes for SKB */
266 read_len = read_len + 2;
267
268 alloc_len = priv->sbus_ops->align_size(
269 priv->sbus_priv, read_len);
270
271 /* Check if not exceeding CW1200 capabilities */
272 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
273 pr_debug("Read aligned len: %zu\n",
274 alloc_len);
275 }
276
277 skb_rx = dev_alloc_skb(alloc_len);
278 if (WARN_ON(!skb_rx))
279 goto err;
280
281 skb_trim(skb_rx, 0);
282 skb_put(skb_rx, read_len);
283 data = skb_rx->data;
284 if (WARN_ON(!data))
285 goto err;
286
287 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
288 pr_err("rx blew up, len %zu\n", alloc_len);
289 goto err;
290 }
291
292 /* Piggyback */
293 *ctrl_reg = __le16_to_cpu(
294 ((__le16 *)data)[alloc_len / 2 - 1]);
295
296 wsm = (struct wsm_hdr *)data;
297 wsm_len = __le16_to_cpu(wsm->len);
298 if (WARN_ON(wsm_len > read_len))
299 goto err;
300
301 if (priv->wsm_enable_wsm_dumps)
302 print_hex_dump_bytes("<-- ",
303 DUMP_PREFIX_NONE,
304 data, wsm_len);
305
306 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
307 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
308
309 skb_trim(skb_rx, wsm_len);
310
311 if (wsm_id == 0x0800) {
312 wsm_handle_exception(priv,
313 &data[sizeof(*wsm)],
314 wsm_len - sizeof(*wsm));
315 goto err;
316 } else if (!rx_resync) {
317 if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
318 goto err;
319 }
320 priv->wsm_rx_seq = (wsm_seq + 1) & 7;
321 rx_resync = 0;
322
323 if (wsm_id & 0x0400) {
324 int rc = wsm_release_tx_buffer(priv, 1);
325 if (WARN_ON(rc < 0))
326 return rc;
327 else if (rc > 0)
328 *tx = 1;
329 }
330
331 /* cw1200_wsm_rx takes care on SKB livetime */
332 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
333 goto err;
334
335 if (skb_rx) {
336 dev_kfree_skb(skb_rx);
337 skb_rx = NULL;
338 }
339
340 return 0;
341
342 err:
343 if (skb_rx) {
344 dev_kfree_skb(skb_rx);
345 skb_rx = NULL;
346 }
347 return -1;
348 }
349
350 static int cw1200_bh_tx_helper(struct cw1200_common *priv,
351 int *pending_tx,
352 int *tx_burst)
353 {
354 size_t tx_len;
355 u8 *data;
356 int ret;
357 struct wsm_hdr *wsm;
358
359 if (priv->device_can_sleep) {
360 ret = cw1200_device_wakeup(priv);
361 if (WARN_ON(ret < 0)) { /* Error in wakeup */
362 *pending_tx = 1;
363 return 0;
364 } else if (ret) { /* Woke up */
365 priv->device_can_sleep = false;
366 } else { /* Did not awake */
367 *pending_tx = 1;
368 return 0;
369 }
370 }
371
372 wsm_alloc_tx_buffer(priv);
373 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
374 if (ret <= 0) {
375 wsm_release_tx_buffer(priv, 1);
376 if (WARN_ON(ret < 0))
377 return ret; /* Error */
378 return 0; /* No work */
379 }
380
381 wsm = (struct wsm_hdr *)data;
382 BUG_ON(tx_len < sizeof(*wsm));
383 BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
384
385 atomic_add(1, &priv->bh_tx);
386
387 tx_len = priv->sbus_ops->align_size(
388 priv->sbus_priv, tx_len);
389
390 /* Check if not exceeding CW1200 capabilities */
391 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
392 pr_debug("Write aligned len: %zu\n", tx_len);
393
394 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
395 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
396
397 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
398 pr_err("tx blew up, len %zu\n", tx_len);
399 wsm_release_tx_buffer(priv, 1);
400 return -1; /* Error */
401 }
402
403 if (priv->wsm_enable_wsm_dumps)
404 print_hex_dump_bytes("--> ",
405 DUMP_PREFIX_NONE,
406 data,
407 __le16_to_cpu(wsm->len));
408
409 wsm_txed(priv, data);
410 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
411
412 if (*tx_burst > 1) {
413 cw1200_debug_tx_burst(priv);
414 return 1; /* Work remains */
415 }
416
417 return 0;
418 }
419
420 static int cw1200_bh(void *arg)
421 {
422 struct cw1200_common *priv = arg;
423 int rx, tx, term, suspend;
424 u16 ctrl_reg = 0;
425 int tx_allowed;
426 int pending_tx = 0;
427 int tx_burst;
428 long status;
429 u32 dummy;
430 int ret;
431
432 for (;;) {
433 if (!priv->hw_bufs_used &&
434 priv->powersave_enabled &&
435 !priv->device_can_sleep &&
436 !atomic_read(&priv->recent_scan)) {
437 status = 1 * HZ;
438 pr_debug("[BH] Device wakedown. No data.\n");
439 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
440 priv->device_can_sleep = true;
441 } else if (priv->hw_bufs_used) {
442 /* Interrupt loss detection */
443 status = 1 * HZ;
444 } else {
445 status = MAX_SCHEDULE_TIMEOUT;
446 }
447
448 /* Dummy Read for SDIO retry mechanism*/
449 if ((priv->hw_type != -1) &&
450 (atomic_read(&priv->bh_rx) == 0) &&
451 (atomic_read(&priv->bh_tx) == 0))
452 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
453 &dummy, sizeof(dummy));
454
455 pr_debug("[BH] waiting ...\n");
456 status = wait_event_interruptible_timeout(priv->bh_wq, ({
457 rx = atomic_xchg(&priv->bh_rx, 0);
458 tx = atomic_xchg(&priv->bh_tx, 0);
459 term = atomic_xchg(&priv->bh_term, 0);
460 suspend = pending_tx ?
461 0 : atomic_read(&priv->bh_suspend);
462 (rx || tx || term || suspend || priv->bh_error);
463 }), status);
464
465 pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
466 rx, tx, term, suspend, status);
467
468 /* Did an error occur? */
469 if ((status < 0 && status != -ERESTARTSYS) ||
470 term || priv->bh_error) {
471 break;
472 }
473 if (!status) { /* wait_event timed out */
474 unsigned long timestamp = jiffies;
475 long timeout;
476 int pending = 0;
477 int i;
478
479 /* Check to see if we have any outstanding frames */
480 if (priv->hw_bufs_used && (!rx || !tx)) {
481 wiphy_warn(priv->hw->wiphy,
482 "Missed interrupt? (%d frames outstanding)\n",
483 priv->hw_bufs_used);
484 rx = 1;
485
486 /* Get a timestamp of "oldest" frame */
487 for (i = 0; i < 4; ++i)
488 pending += cw1200_queue_get_xmit_timestamp(
489 &priv->tx_queue[i],
490 &timestamp,
491 priv->pending_frame_id);
492
493 /* Check if frame transmission is timed out.
494 * Add an extra second with respect to possible
495 * interrupt loss.
496 */
497 timeout = timestamp +
498 WSM_CMD_LAST_CHANCE_TIMEOUT +
499 1 * HZ -
500 jiffies;
501
502 /* And terminate BH thread if the frame is "stuck" */
503 if (pending && timeout < 0) {
504 wiphy_warn(priv->hw->wiphy,
505 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
506 priv->hw_bufs_used, pending,
507 timestamp, jiffies);
508 break;
509 }
510 } else if (!priv->device_can_sleep &&
511 !atomic_read(&priv->recent_scan)) {
512 pr_debug("[BH] Device wakedown. Timeout.\n");
513 cw1200_reg_write_16(priv,
514 ST90TDS_CONTROL_REG_ID, 0);
515 priv->device_can_sleep = true;
516 }
517 goto done;
518 } else if (suspend) {
519 pr_debug("[BH] Device suspend.\n");
520 if (priv->powersave_enabled) {
521 pr_debug("[BH] Device wakedown. Suspend.\n");
522 cw1200_reg_write_16(priv,
523 ST90TDS_CONTROL_REG_ID, 0);
524 priv->device_can_sleep = true;
525 }
526
527 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
528 wake_up(&priv->bh_evt_wq);
529 status = wait_event_interruptible(priv->bh_wq,
530 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
531 if (status < 0) {
532 wiphy_err(priv->hw->wiphy,
533 "Failed to wait for resume: %ld.\n",
534 status);
535 break;
536 }
537 pr_debug("[BH] Device resume.\n");
538 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
539 wake_up(&priv->bh_evt_wq);
540 atomic_add(1, &priv->bh_rx);
541 goto done;
542 }
543
544 rx:
545 tx += pending_tx;
546 pending_tx = 0;
547
548 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
549 break;
550
551 /* Don't bother trying to rx unless we have data to read */
552 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
553 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
554 if (ret < 0)
555 break;
556 /* Double up here if there's more data.. */
557 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
558 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
559 if (ret < 0)
560 break;
561 }
562 }
563
564 tx:
565 if (tx) {
566 tx = 0;
567
568 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
569 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
570 tx_allowed = tx_burst > 0;
571
572 if (!tx_allowed) {
573 /* Buffers full. Ensure we process tx
574 * after we handle rx..
575 */
576 pending_tx = tx;
577 goto done_rx;
578 }
579 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
580 if (ret < 0)
581 break;
582 if (ret > 0) /* More to transmit */
583 tx = ret;
584
585 /* Re-read ctrl reg */
586 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
587 break;
588 }
589
590 done_rx:
591 if (priv->bh_error)
592 break;
593 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
594 goto rx;
595 if (tx)
596 goto tx;
597
598 done:
599 /* Re-enable device interrupts */
600 priv->sbus_ops->lock(priv->sbus_priv);
601 __cw1200_irq_enable(priv, 1);
602 priv->sbus_ops->unlock(priv->sbus_priv);
603 }
604
605 /* Explicitly disable device interrupts */
606 priv->sbus_ops->lock(priv->sbus_priv);
607 __cw1200_irq_enable(priv, 0);
608 priv->sbus_ops->unlock(priv->sbus_priv);
609
610 if (!term) {
611 pr_err("[BH] Fatal error, exiting.\n");
612 priv->bh_error = 1;
613 /* TODO: schedule_work(recovery) */
614 }
615 return 0;
616 }