]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/wireless/iwlwifi/iwl-agn-lib.c
iwlwifi: move agn only rx functions from iwlcore to iwlagn
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / wireless / iwlwifi / iwl-agn-lib.c
CommitLineData
e04ed0a5
WYG
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
43{
44 return le32_to_cpup((__le32 *)&tx_resp->status +
45 tx_resp->frame_count) & MAX_SN;
46}
47
48static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
49 struct iwl_ht_agg *agg,
50 struct iwl5000_tx_resp *tx_resp,
51 int txq_id, u16 start_idx)
52{
53 u16 status;
54 struct agg_tx_status *frame_status = &tx_resp->status;
55 struct ieee80211_tx_info *info = NULL;
56 struct ieee80211_hdr *hdr = NULL;
57 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
58 int i, sh, idx;
59 u16 seq;
60
61 if (agg->wait_for_ba)
62 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
63
64 agg->frame_count = tx_resp->frame_count;
65 agg->start_idx = start_idx;
66 agg->rate_n_flags = rate_n_flags;
67 agg->bitmap = 0;
68
69 /* # frames attempted by Tx command */
70 if (agg->frame_count == 1) {
71 /* Only one frame was attempted; no block-ack will arrive */
72 status = le16_to_cpu(frame_status[0].status);
73 idx = start_idx;
74
75 /* FIXME: code repetition */
76 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
77 agg->frame_count, agg->start_idx, idx);
78
79 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
80 info->status.rates[0].count = tx_resp->failure_frame + 1;
81 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
82 info->flags |= iwl_tx_status_to_mac80211(status);
83 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
84
85 /* FIXME: code repetition end */
86
87 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
88 status & 0xff, tx_resp->failure_frame);
89 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
90
91 agg->wait_for_ba = 0;
92 } else {
93 /* Two or more frames were attempted; expect block-ack */
94 u64 bitmap = 0;
95 int start = agg->start_idx;
96
97 /* Construct bit-map of pending frames within Tx window */
98 for (i = 0; i < agg->frame_count; i++) {
99 u16 sc;
100 status = le16_to_cpu(frame_status[i].status);
101 seq = le16_to_cpu(frame_status[i].sequence);
102 idx = SEQ_TO_INDEX(seq);
103 txq_id = SEQ_TO_QUEUE(seq);
104
105 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
106 AGG_TX_STATE_ABORT_MSK))
107 continue;
108
109 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
110 agg->frame_count, txq_id, idx);
111
112 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
113 if (!hdr) {
114 IWL_ERR(priv,
115 "BUG_ON idx doesn't point to valid skb"
116 " idx=%d, txq_id=%d\n", idx, txq_id);
117 return -1;
118 }
119
120 sc = le16_to_cpu(hdr->seq_ctrl);
121 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
122 IWL_ERR(priv,
123 "BUG_ON idx doesn't match seq control"
124 " idx=%d, seq_idx=%d, seq=%d\n",
125 idx, SEQ_TO_SN(sc),
126 hdr->seq_ctrl);
127 return -1;
128 }
129
130 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
131 i, idx, SEQ_TO_SN(sc));
132
133 sh = idx - start;
134 if (sh > 64) {
135 sh = (start - idx) + 0xff;
136 bitmap = bitmap << sh;
137 sh = 0;
138 start = idx;
139 } else if (sh < -64)
140 sh = 0xff - (start - idx);
141 else if (sh < 0) {
142 sh = start - idx;
143 start = idx;
144 bitmap = bitmap << sh;
145 sh = 0;
146 }
147 bitmap |= 1ULL << sh;
148 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
149 start, (unsigned long long)bitmap);
150 }
151
152 agg->bitmap = bitmap;
153 agg->start_idx = start;
154 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
155 agg->frame_count, agg->start_idx,
156 (unsigned long long)agg->bitmap);
157
158 if (bitmap)
159 agg->wait_for_ba = 1;
160 }
161 return 0;
162}
163
164static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
165 struct iwl_rx_mem_buffer *rxb)
166{
167 struct iwl_rx_packet *pkt = rxb_addr(rxb);
168 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
169 int txq_id = SEQ_TO_QUEUE(sequence);
170 int index = SEQ_TO_INDEX(sequence);
171 struct iwl_tx_queue *txq = &priv->txq[txq_id];
172 struct ieee80211_tx_info *info;
173 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
174 u32 status = le16_to_cpu(tx_resp->status.status);
175 int tid;
176 int sta_id;
177 int freed;
178
179 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
180 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
181 "is out of range [0-%d] %d %d\n", txq_id,
182 index, txq->q.n_bd, txq->q.write_ptr,
183 txq->q.read_ptr);
184 return;
185 }
186
187 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
188 memset(&info->status, 0, sizeof(info->status));
189
190 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
191 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
192
193 if (txq->sched_retry) {
194 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
195 struct iwl_ht_agg *agg = NULL;
196
197 agg = &priv->stations[sta_id].tid[tid].agg;
198
199 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
200
201 /* check if BAR is needed */
202 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
203 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
204
205 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
206 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
207 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
208 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
209 scd_ssn , index, txq_id, txq->swq_id);
210
74bcdb33 211 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
e04ed0a5
WYG
212 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
213
214 if (priv->mac80211_registered &&
215 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
216 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
217 if (agg->state == IWL_AGG_OFF)
218 iwl_wake_queue(priv, txq_id);
219 else
220 iwl_wake_queue(priv, txq->swq_id);
221 }
222 }
223 } else {
224 BUG_ON(txq_id != txq->swq_id);
225
226 info->status.rates[0].count = tx_resp->failure_frame + 1;
227 info->flags |= iwl_tx_status_to_mac80211(status);
228 iwl_hwrate_to_tx_control(priv,
229 le32_to_cpu(tx_resp->rate_n_flags),
230 info);
231
232 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
233 "0x%x retries %d\n",
234 txq_id,
235 iwl_get_tx_fail_reason(status), status,
236 le32_to_cpu(tx_resp->rate_n_flags),
237 tx_resp->failure_frame);
238
74bcdb33 239 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
e04ed0a5
WYG
240 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
241
242 if (priv->mac80211_registered &&
243 (iwl_queue_space(&txq->q) > txq->q.low_mark))
244 iwl_wake_queue(priv, txq_id);
245 }
246
74bcdb33 247 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
e04ed0a5
WYG
248
249 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
250 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
251}
252
253void iwlagn_rx_handler_setup(struct iwl_priv *priv)
254{
255 /* init calibration handlers */
256 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
257 iwlagn_rx_calib_result;
258 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
259 iwlagn_rx_calib_complete;
260 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
261}
262
263void iwlagn_setup_deferred_work(struct iwl_priv *priv)
264{
265 /* in agn, the tx power calibration is done in uCode */
266 priv->disable_tx_power_cal = 1;
267}
268
269int iwlagn_hw_valid_rtc_data_addr(u32 addr)
270{
271 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
272 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
273}
274
275int iwlagn_send_tx_power(struct iwl_priv *priv)
276{
277 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
278 u8 tx_ant_cfg_cmd;
279
280 /* half dBm need to multiply */
281 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
282
283 if (priv->tx_power_lmt_in_half_dbm &&
284 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
285 /*
286 * For the newer devices which using enhanced/extend tx power
287 * table in EEPROM, the format is in half dBm. driver need to
288 * convert to dBm format before report to mac80211.
289 * By doing so, there is a possibility of 1/2 dBm resolution
290 * lost. driver will perform "round-up" operation before
291 * reporting, but it will cause 1/2 dBm tx power over the
292 * regulatory limit. Perform the checking here, if the
293 * "tx_power_user_lmt" is higher than EEPROM value (in
294 * half-dBm format), lower the tx power based on EEPROM
295 */
296 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
297 }
298 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
299 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
300
301 if (IWL_UCODE_API(priv->ucode_ver) == 1)
302 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
303 else
304 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
305
306 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
307 sizeof(tx_power_cmd), &tx_power_cmd,
308 NULL);
309}
310
311void iwlagn_temperature(struct iwl_priv *priv)
312{
313 /* store temperature from statistics (in Celsius) */
314 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
315 iwl_tt_handler(priv);
316}
317
318u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
319{
320 struct iwl_eeprom_calib_hdr {
321 u8 version;
322 u8 pa_type;
323 u16 voltage;
324 } *hdr;
325
326 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
327 EEPROM_5000_CALIB_ALL);
328 return hdr->version;
329
330}
331
332/*
333 * EEPROM
334 */
335static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
336{
337 u16 offset = 0;
338
339 if ((address & INDIRECT_ADDRESS) == 0)
340 return address;
341
342 switch (address & INDIRECT_TYPE_MSK) {
343 case INDIRECT_HOST:
344 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
345 break;
346 case INDIRECT_GENERAL:
347 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
348 break;
349 case INDIRECT_REGULATORY:
350 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
351 break;
352 case INDIRECT_CALIBRATION:
353 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
354 break;
355 case INDIRECT_PROCESS_ADJST:
356 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
357 break;
358 case INDIRECT_OTHERS:
359 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
360 break;
361 default:
362 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
363 address & INDIRECT_TYPE_MSK);
364 break;
365 }
366
367 /* translate the offset from words to byte */
368 return (address & ADDRESS_MSK) + (offset << 1);
369}
370
371const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
372 size_t offset)
373{
374 u32 address = eeprom_indirect_address(priv, offset);
375 BUG_ON(address >= priv->cfg->eeprom_size);
376 return &priv->eeprom[address];
377}
348ee7cd
WYG
378
379struct iwl_mod_params iwlagn_mod_params = {
380 .amsdu_size_8K = 1,
381 .restart_fw = 1,
382 /* the rest are 0 by default */
383};
74bcdb33
WYG
384
385void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
386{
387 unsigned long flags;
388 int i;
389 spin_lock_irqsave(&rxq->lock, flags);
390 INIT_LIST_HEAD(&rxq->rx_free);
391 INIT_LIST_HEAD(&rxq->rx_used);
392 /* Fill the rx_used queue with _all_ of the Rx buffers */
393 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
394 /* In the reset function, these buffers may have been allocated
395 * to an SKB, so we need to unmap and free potential storage */
396 if (rxq->pool[i].page != NULL) {
397 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
398 PAGE_SIZE << priv->hw_params.rx_page_order,
399 PCI_DMA_FROMDEVICE);
400 __iwl_free_pages(priv, rxq->pool[i].page);
401 rxq->pool[i].page = NULL;
402 }
403 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
404 }
405
406 /* Set us so that we have processed and used all buffers, but have
407 * not restocked the Rx queue with fresh buffers */
408 rxq->read = rxq->write = 0;
409 rxq->write_actual = 0;
410 rxq->free_count = 0;
411 spin_unlock_irqrestore(&rxq->lock, flags);
412}
413
414int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
415{
416 u32 rb_size;
417 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
418 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
419
420 if (!priv->cfg->use_isr_legacy)
421 rb_timeout = RX_RB_TIMEOUT;
422
423 if (priv->cfg->mod_params->amsdu_size_8K)
424 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
425 else
426 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
427
428 /* Stop Rx DMA */
429 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
430
431 /* Reset driver's Rx queue write index */
432 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
433
434 /* Tell device where to find RBD circular buffer in DRAM */
435 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
436 (u32)(rxq->dma_addr >> 8));
437
438 /* Tell device where in DRAM to update its Rx status */
439 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
440 rxq->rb_stts_dma >> 4);
441
442 /* Enable Rx DMA
443 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
444 * the credit mechanism in 5000 HW RX FIFO
445 * Direct rx interrupts to hosts
446 * Rx buffer size 4 or 8k
447 * RB timeout 0x10
448 * 256 RBDs
449 */
450 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
451 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
452 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
453 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
454 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
455 rb_size|
456 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
457 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
458
459 /* Set interrupt coalescing timer to default (2048 usecs) */
460 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
461
462 return 0;
463}
464
465int iwlagn_hw_nic_init(struct iwl_priv *priv)
466{
467 unsigned long flags;
468 struct iwl_rx_queue *rxq = &priv->rxq;
469 int ret;
470
471 /* nic_init */
472 spin_lock_irqsave(&priv->lock, flags);
473 priv->cfg->ops->lib->apm_ops.init(priv);
474
475 /* Set interrupt coalescing calibration timer to default (512 usecs) */
476 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
477
478 spin_unlock_irqrestore(&priv->lock, flags);
479
480 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
481
482 priv->cfg->ops->lib->apm_ops.config(priv);
483
484 /* Allocate the RX queue, or reset if it is already allocated */
485 if (!rxq->bd) {
486 ret = iwl_rx_queue_alloc(priv);
487 if (ret) {
488 IWL_ERR(priv, "Unable to initialize Rx queue\n");
489 return -ENOMEM;
490 }
491 } else
492 iwlagn_rx_queue_reset(priv, rxq);
493
54b81550 494 iwlagn_rx_replenish(priv);
74bcdb33
WYG
495
496 iwlagn_rx_init(priv, rxq);
497
498 spin_lock_irqsave(&priv->lock, flags);
499
500 rxq->need_update = 1;
501 iwl_rx_queue_update_write_ptr(priv, rxq);
502
503 spin_unlock_irqrestore(&priv->lock, flags);
504
505 /* Allocate and init all Tx and Command queues */
506 ret = iwlagn_txq_ctx_reset(priv);
507 if (ret)
508 return ret;
509
510 set_bit(STATUS_INIT, &priv->status);
511
512 return 0;
513}
54b81550
WYG
514
515/**
516 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
517 */
518static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
519 dma_addr_t dma_addr)
520{
521 return cpu_to_le32((u32)(dma_addr >> 8));
522}
523
524/**
525 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
526 *
527 * If there are slots in the RX queue that need to be restocked,
528 * and we have free pre-allocated buffers, fill the ranks as much
529 * as we can, pulling from rx_free.
530 *
531 * This moves the 'write' index forward to catch up with 'processed', and
532 * also updates the memory address in the firmware to reference the new
533 * target buffer.
534 */
535void iwlagn_rx_queue_restock(struct iwl_priv *priv)
536{
537 struct iwl_rx_queue *rxq = &priv->rxq;
538 struct list_head *element;
539 struct iwl_rx_mem_buffer *rxb;
540 unsigned long flags;
541 int write;
542
543 spin_lock_irqsave(&rxq->lock, flags);
544 write = rxq->write & ~0x7;
545 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
546 /* Get next free Rx buffer, remove from free list */
547 element = rxq->rx_free.next;
548 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
549 list_del(element);
550
551 /* Point to Rx buffer via next RBD in circular buffer */
552 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
553 rxb->page_dma);
554 rxq->queue[rxq->write] = rxb;
555 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
556 rxq->free_count--;
557 }
558 spin_unlock_irqrestore(&rxq->lock, flags);
559 /* If the pre-allocated buffer pool is dropping low, schedule to
560 * refill it */
561 if (rxq->free_count <= RX_LOW_WATERMARK)
562 queue_work(priv->workqueue, &priv->rx_replenish);
563
564
565 /* If we've added more space for the firmware to place data, tell it.
566 * Increment device's write pointer in multiples of 8. */
567 if (rxq->write_actual != (rxq->write & ~0x7)) {
568 spin_lock_irqsave(&rxq->lock, flags);
569 rxq->need_update = 1;
570 spin_unlock_irqrestore(&rxq->lock, flags);
571 iwl_rx_queue_update_write_ptr(priv, rxq);
572 }
573}
574
575/**
576 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
577 *
578 * When moving to rx_free an SKB is allocated for the slot.
579 *
580 * Also restock the Rx queue via iwl_rx_queue_restock.
581 * This is called as a scheduled work item (except for during initialization)
582 */
583void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
584{
585 struct iwl_rx_queue *rxq = &priv->rxq;
586 struct list_head *element;
587 struct iwl_rx_mem_buffer *rxb;
588 struct page *page;
589 unsigned long flags;
590 gfp_t gfp_mask = priority;
591
592 while (1) {
593 spin_lock_irqsave(&rxq->lock, flags);
594 if (list_empty(&rxq->rx_used)) {
595 spin_unlock_irqrestore(&rxq->lock, flags);
596 return;
597 }
598 spin_unlock_irqrestore(&rxq->lock, flags);
599
600 if (rxq->free_count > RX_LOW_WATERMARK)
601 gfp_mask |= __GFP_NOWARN;
602
603 if (priv->hw_params.rx_page_order > 0)
604 gfp_mask |= __GFP_COMP;
605
606 /* Alloc a new receive buffer */
607 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
608 if (!page) {
609 if (net_ratelimit())
610 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
611 "order: %d\n",
612 priv->hw_params.rx_page_order);
613
614 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
615 net_ratelimit())
616 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
617 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
618 rxq->free_count);
619 /* We don't reschedule replenish work here -- we will
620 * call the restock method and if it still needs
621 * more buffers it will schedule replenish */
622 return;
623 }
624
625 spin_lock_irqsave(&rxq->lock, flags);
626
627 if (list_empty(&rxq->rx_used)) {
628 spin_unlock_irqrestore(&rxq->lock, flags);
629 __free_pages(page, priv->hw_params.rx_page_order);
630 return;
631 }
632 element = rxq->rx_used.next;
633 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
634 list_del(element);
635
636 spin_unlock_irqrestore(&rxq->lock, flags);
637
638 rxb->page = page;
639 /* Get physical address of the RB */
640 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
641 PAGE_SIZE << priv->hw_params.rx_page_order,
642 PCI_DMA_FROMDEVICE);
643 /* dma address must be no more than 36 bits */
644 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
645 /* and also 256 byte aligned! */
646 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
647
648 spin_lock_irqsave(&rxq->lock, flags);
649
650 list_add_tail(&rxb->list, &rxq->rx_free);
651 rxq->free_count++;
652 priv->alloc_rxb_page++;
653
654 spin_unlock_irqrestore(&rxq->lock, flags);
655 }
656}
657
658void iwlagn_rx_replenish(struct iwl_priv *priv)
659{
660 unsigned long flags;
661
662 iwlagn_rx_allocate(priv, GFP_KERNEL);
663
664 spin_lock_irqsave(&priv->lock, flags);
665 iwlagn_rx_queue_restock(priv);
666 spin_unlock_irqrestore(&priv->lock, flags);
667}
668
669void iwlagn_rx_replenish_now(struct iwl_priv *priv)
670{
671 iwlagn_rx_allocate(priv, GFP_ATOMIC);
672
673 iwlagn_rx_queue_restock(priv);
674}
675
676/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
677 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
678 * This free routine walks the list of POOL entries and if SKB is set to
679 * non NULL it is unmapped and freed
680 */
681void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
682{
683 int i;
684 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
685 if (rxq->pool[i].page != NULL) {
686 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
687 PAGE_SIZE << priv->hw_params.rx_page_order,
688 PCI_DMA_FROMDEVICE);
689 __iwl_free_pages(priv, rxq->pool[i].page);
690 rxq->pool[i].page = NULL;
691 }
692 }
693
694 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
695 rxq->dma_addr);
696 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
697 rxq->rb_stts, rxq->rb_stts_dma);
698 rxq->bd = NULL;
699 rxq->rb_stts = NULL;
700}
701
702int iwlagn_rxq_stop(struct iwl_priv *priv)
703{
704
705 /* stop Rx DMA */
706 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
707 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
708 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
709
710 return 0;
711}