]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/wireless/rt2x00/rt2x00pci.c
rt2x00: Determine MY_BSS from descriptor
[mirror_ubuntu-bionic-kernel.git] / drivers / net / wireless / rt2x00 / rt2x00pci.c
CommitLineData
95ea3627
ID
1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00pci
23 Abstract: rt2x00 generic pci device routines.
24 */
25
95ea3627
ID
26#include <linux/dma-mapping.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30
31#include "rt2x00.h"
32#include "rt2x00pci.h"
33
34/*
35 * Beacon handlers.
36 */
37int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
38 struct ieee80211_tx_control *control)
39{
40 struct rt2x00_dev *rt2x00dev = hw->priv;
08992f7f
ID
41 struct skb_desc *desc;
42 struct data_ring *ring;
43 struct data_entry *entry;
95ea3627
ID
44
45 /*
46 * Just in case mac80211 doesn't set this correctly,
47 * but we need this queue set for the descriptor
48 * initialization.
49 */
50 control->queue = IEEE80211_TX_QUEUE_BEACON;
08992f7f
ID
51 ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
52 entry = rt2x00_get_data_entry(ring);
95ea3627
ID
53
54 /*
08992f7f 55 * Fill in skb descriptor
95ea3627 56 */
08992f7f
ID
57 desc = get_skb_desc(skb);
58 desc->desc_len = ring->desc_size;
59 desc->data_len = skb->len;
60 desc->desc = entry->priv;
61 desc->data = skb->data;
62 desc->ring = ring;
63 desc->entry = entry;
64
95ea3627 65 memcpy(entry->data_addr, skb->data, skb->len);
08992f7f 66 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
95ea3627
ID
67
68 /*
69 * Enable beacon generation.
70 */
71 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
76
77/*
78 * TX data handlers.
79 */
80int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
81 struct data_ring *ring, struct sk_buff *skb,
82 struct ieee80211_tx_control *control)
83{
95ea3627 84 struct data_entry *entry = rt2x00_get_data_entry(ring);
4bd7c452 85 __le32 *txd = entry->priv;
08992f7f 86 struct skb_desc *desc;
95ea3627
ID
87 u32 word;
88
1230cb83 89 if (rt2x00_ring_full(ring))
95ea3627 90 return -EINVAL;
95ea3627
ID
91
92 rt2x00_desc_read(txd, 0, &word);
93
94 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
95 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
96 ERROR(rt2x00dev,
97 "Arrived at non-free entry in the non-full queue %d.\n"
98 "Please file bug report to %s.\n",
99 control->queue, DRV_PROJECT);
95ea3627
ID
100 return -EINVAL;
101 }
102
08992f7f
ID
103 /*
104 * Fill in skb descriptor
105 */
106 desc = get_skb_desc(skb);
107 desc->desc_len = ring->desc_size;
108 desc->data_len = skb->len;
109 desc->desc = entry->priv;
110 desc->data = skb->data;
111 desc->ring = ring;
112 desc->entry = entry;
113
95ea3627 114 memcpy(entry->data_addr, skb->data, skb->len);
08992f7f 115 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
95ea3627
ID
116
117 rt2x00_ring_index_inc(ring);
118
95ea3627
ID
119 return 0;
120}
121EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
122
123/*
3957ccb5 124 * TX/RX data handlers.
95ea3627
ID
125 */
126void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
127{
128 struct data_ring *ring = rt2x00dev->rx;
129 struct data_entry *entry;
95ea3627 130 struct sk_buff *skb;
c5d0dc5f 131 struct ieee80211_hdr *hdr;
08992f7f 132 struct skb_desc *skbdesc;
4150c572 133 struct rxdata_entry_desc desc;
c5d0dc5f 134 int header_size;
4bd7c452 135 __le32 *rxd;
c5d0dc5f 136 int align;
4150c572 137 u32 word;
95ea3627
ID
138
139 while (1) {
140 entry = rt2x00_get_data_entry(ring);
141 rxd = entry->priv;
4150c572 142 rt2x00_desc_read(rxd, 0, &word);
95ea3627 143
4150c572 144 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
95ea3627
ID
145 break;
146
08992f7f 147 memset(&desc, 0, sizeof(desc));
4150c572 148 rt2x00dev->ops->lib->fill_rxdone(entry, &desc);
95ea3627 149
c5d0dc5f
ID
150 hdr = (struct ieee80211_hdr *)entry->data_addr;
151 header_size =
152 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
153
154 /*
155 * The data behind the ieee80211 header must be
156 * aligned on a 4 byte boundary.
157 */
d101f649 158 align = header_size % 4;
c5d0dc5f 159
95ea3627
ID
160 /*
161 * Allocate the sk_buffer, initialize it and copy
162 * all data into it.
163 */
c5d0dc5f 164 skb = dev_alloc_skb(desc.size + align);
95ea3627
ID
165 if (!skb)
166 return;
167
c5d0dc5f
ID
168 skb_reserve(skb, align);
169 memcpy(skb_put(skb, desc.size), entry->data_addr, desc.size);
95ea3627 170
08992f7f
ID
171 /*
172 * Fill in skb descriptor
173 */
174 skbdesc = get_skb_desc(skb);
4d8dd66c
ID
175 skbdesc->desc_len = entry->ring->desc_size;
176 skbdesc->data_len = skb->len;
08992f7f
ID
177 skbdesc->desc = entry->priv;
178 skbdesc->data = skb->data;
179 skbdesc->ring = ring;
180 skbdesc->entry = entry;
181
95ea3627
ID
182 /*
183 * Send the frame to rt2x00lib for further processing.
184 */
4150c572 185 rt2x00lib_rxdone(entry, skb, &desc);
95ea3627 186
95ea3627 187 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) {
4150c572
JB
188 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
189 rt2x00_desc_write(rxd, 0, word);
95ea3627
ID
190 }
191
192 rt2x00_ring_index_inc(ring);
193 }
194}
195EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
196
3957ccb5
ID
197void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry,
198 const int tx_status, const int retry)
199{
200 u32 word;
201
202 rt2x00lib_txdone(entry, tx_status, retry);
203
204 /*
205 * Make this entry available for reuse.
206 */
207 entry->flags = 0;
208
209 rt2x00_desc_read(entry->priv, 0, &word);
210 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
211 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
212 rt2x00_desc_write(entry->priv, 0, word);
213
214 rt2x00_ring_index_done_inc(entry->ring);
215
216 /*
217 * If the data ring was full before the txdone handler
218 * we must make sure the packet queue in the mac80211 stack
219 * is reenabled when the txdone handler has finished.
220 */
221 if (!rt2x00_ring_full(entry->ring))
222 ieee80211_wake_queue(rt2x00dev->hw,
223 entry->tx_status.control.queue);
224
225}
226EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
227
95ea3627
ID
228/*
229 * Device initialization handlers.
230 */
231#define priv_offset(__ring, __i) \
232({ \
233 ring->data_addr + (i * ring->desc_size); \
234})
235
236#define data_addr_offset(__ring, __i) \
237({ \
238 (__ring)->data_addr + \
239 ((__ring)->stats.limit * (__ring)->desc_size) + \
240 ((__i) * (__ring)->data_size); \
241})
242
243#define data_dma_offset(__ring, __i) \
244({ \
245 (__ring)->data_dma + \
246 ((__ring)->stats.limit * (__ring)->desc_size) + \
247 ((__i) * (__ring)->data_size); \
248})
249
250static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev,
251 struct data_ring *ring)
252{
253 unsigned int i;
254
255 /*
256 * Allocate DMA memory for descriptor and buffer.
257 */
258 ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev),
259 rt2x00_get_ring_size(ring),
260 &ring->data_dma);
261 if (!ring->data_addr)
262 return -ENOMEM;
263
264 /*
265 * Initialize all ring entries to contain valid
266 * addresses.
267 */
268 for (i = 0; i < ring->stats.limit; i++) {
269 ring->entry[i].priv = priv_offset(ring, i);
270 ring->entry[i].data_addr = data_addr_offset(ring, i);
271 ring->entry[i].data_dma = data_dma_offset(ring, i);
272 }
273
274 return 0;
275}
276
277static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev,
278 struct data_ring *ring)
279{
280 if (ring->data_addr)
281 pci_free_consistent(rt2x00dev_pci(rt2x00dev),
282 rt2x00_get_ring_size(ring),
283 ring->data_addr, ring->data_dma);
284 ring->data_addr = NULL;
285}
286
287int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
288{
289 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
290 struct data_ring *ring;
291 int status;
292
293 /*
294 * Allocate DMA
295 */
296 ring_for_each(rt2x00dev, ring) {
297 status = rt2x00pci_alloc_dma(rt2x00dev, ring);
298 if (status)
299 goto exit;
300 }
301
302 /*
303 * Register interrupt handler.
304 */
305 status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
306 IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
307 if (status) {
308 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
309 pci_dev->irq, status);
310 return status;
311 }
312
313 return 0;
314
315exit:
316 rt2x00pci_uninitialize(rt2x00dev);
317
318 return status;
319}
320EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
321
322void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
323{
324 struct data_ring *ring;
325
326 /*
327 * Free irq line.
328 */
329 free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);
330
331 /*
332 * Free DMA
333 */
334 ring_for_each(rt2x00dev, ring)
335 rt2x00pci_free_dma(rt2x00dev, ring);
336}
337EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
338
339/*
340 * PCI driver handlers.
341 */
342static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
343{
344 kfree(rt2x00dev->rf);
345 rt2x00dev->rf = NULL;
346
347 kfree(rt2x00dev->eeprom);
348 rt2x00dev->eeprom = NULL;
349
350 if (rt2x00dev->csr_addr) {
351 iounmap(rt2x00dev->csr_addr);
352 rt2x00dev->csr_addr = NULL;
353 }
354}
355
356static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
357{
358 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
359
360 rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0),
361 pci_resource_len(pci_dev, 0));
362 if (!rt2x00dev->csr_addr)
363 goto exit;
364
365 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
366 if (!rt2x00dev->eeprom)
367 goto exit;
368
369 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
370 if (!rt2x00dev->rf)
371 goto exit;
372
373 return 0;
374
375exit:
376 ERROR_PROBE("Failed to allocate registers.\n");
377
378 rt2x00pci_free_reg(rt2x00dev);
379
380 return -ENOMEM;
381}
382
383int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
384{
385 struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
386 struct ieee80211_hw *hw;
387 struct rt2x00_dev *rt2x00dev;
388 int retval;
389
390 retval = pci_request_regions(pci_dev, pci_name(pci_dev));
391 if (retval) {
392 ERROR_PROBE("PCI request regions failed.\n");
393 return retval;
394 }
395
396 retval = pci_enable_device(pci_dev);
397 if (retval) {
398 ERROR_PROBE("Enable device failed.\n");
399 goto exit_release_regions;
400 }
401
402 pci_set_master(pci_dev);
403
404 if (pci_set_mwi(pci_dev))
405 ERROR_PROBE("MWI not available.\n");
406
407 if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
408 pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
409 ERROR_PROBE("PCI DMA not supported.\n");
410 retval = -EIO;
411 goto exit_disable_device;
412 }
413
414 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
415 if (!hw) {
416 ERROR_PROBE("Failed to allocate hardware.\n");
417 retval = -ENOMEM;
418 goto exit_disable_device;
419 }
420
421 pci_set_drvdata(pci_dev, hw);
422
423 rt2x00dev = hw->priv;
424 rt2x00dev->dev = pci_dev;
425 rt2x00dev->ops = ops;
426 rt2x00dev->hw = hw;
427
428 retval = rt2x00pci_alloc_reg(rt2x00dev);
429 if (retval)
430 goto exit_free_device;
431
432 retval = rt2x00lib_probe_dev(rt2x00dev);
433 if (retval)
434 goto exit_free_reg;
435
436 return 0;
437
438exit_free_reg:
439 rt2x00pci_free_reg(rt2x00dev);
440
441exit_free_device:
442 ieee80211_free_hw(hw);
443
444exit_disable_device:
445 if (retval != -EBUSY)
446 pci_disable_device(pci_dev);
447
448exit_release_regions:
449 pci_release_regions(pci_dev);
450
451 pci_set_drvdata(pci_dev, NULL);
452
453 return retval;
454}
455EXPORT_SYMBOL_GPL(rt2x00pci_probe);
456
457void rt2x00pci_remove(struct pci_dev *pci_dev)
458{
459 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
460 struct rt2x00_dev *rt2x00dev = hw->priv;
461
462 /*
463 * Free all allocated data.
464 */
465 rt2x00lib_remove_dev(rt2x00dev);
466 rt2x00pci_free_reg(rt2x00dev);
467 ieee80211_free_hw(hw);
468
469 /*
470 * Free the PCI device data.
471 */
472 pci_set_drvdata(pci_dev, NULL);
473 pci_disable_device(pci_dev);
474 pci_release_regions(pci_dev);
475}
476EXPORT_SYMBOL_GPL(rt2x00pci_remove);
477
478#ifdef CONFIG_PM
479int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
480{
481 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
482 struct rt2x00_dev *rt2x00dev = hw->priv;
483 int retval;
484
485 retval = rt2x00lib_suspend(rt2x00dev, state);
486 if (retval)
487 return retval;
488
489 rt2x00pci_free_reg(rt2x00dev);
490
491 pci_save_state(pci_dev);
492 pci_disable_device(pci_dev);
493 return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
494}
495EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
496
497int rt2x00pci_resume(struct pci_dev *pci_dev)
498{
499 struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
500 struct rt2x00_dev *rt2x00dev = hw->priv;
501 int retval;
502
503 if (pci_set_power_state(pci_dev, PCI_D0) ||
504 pci_enable_device(pci_dev) ||
505 pci_restore_state(pci_dev)) {
506 ERROR(rt2x00dev, "Failed to resume device.\n");
507 return -EIO;
508 }
509
510 retval = rt2x00pci_alloc_reg(rt2x00dev);
511 if (retval)
512 return retval;
513
514 retval = rt2x00lib_resume(rt2x00dev);
515 if (retval)
516 goto exit_free_reg;
517
518 return 0;
519
520exit_free_reg:
521 rt2x00pci_free_reg(rt2x00dev);
522
523 return retval;
524}
525EXPORT_SYMBOL_GPL(rt2x00pci_resume);
526#endif /* CONFIG_PM */
527
528/*
529 * rt2x00pci module information.
530 */
531MODULE_AUTHOR(DRV_PROJECT);
532MODULE_VERSION(DRV_VERSION);
533MODULE_DESCRIPTION("rt2x00 library");
534MODULE_LICENSE("GPL");