1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
13 static int mt76x0e_start(struct ieee80211_hw
*hw
)
15 struct mt76x02_dev
*dev
= hw
->priv
;
17 mt76x02_mac_start(dev
);
18 mt76x0_phy_calibrate(dev
, true);
19 ieee80211_queue_delayed_work(dev
->mt76
.hw
, &dev
->mt76
.mac_work
,
20 MT_MAC_WORK_INTERVAL
);
21 ieee80211_queue_delayed_work(dev
->mt76
.hw
, &dev
->cal_work
,
22 MT_CALIBRATE_INTERVAL
);
23 set_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
);
28 static void mt76x0e_stop_hw(struct mt76x02_dev
*dev
)
30 cancel_delayed_work_sync(&dev
->cal_work
);
31 cancel_delayed_work_sync(&dev
->mt76
.mac_work
);
32 clear_bit(MT76_RESTART
, &dev
->mphy
.state
);
34 if (!mt76_poll(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_TX_DMA_BUSY
,
36 dev_warn(dev
->mt76
.dev
, "TX DMA did not stop\n");
37 mt76_clear(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_TX_DMA_EN
);
41 if (!mt76_poll(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_RX_DMA_BUSY
,
43 dev_warn(dev
->mt76
.dev
, "TX DMA did not stop\n");
44 mt76_clear(dev
, MT_WPDMA_GLO_CFG
, MT_WPDMA_GLO_CFG_RX_DMA_EN
);
47 static void mt76x0e_stop(struct ieee80211_hw
*hw
)
49 struct mt76x02_dev
*dev
= hw
->priv
;
51 clear_bit(MT76_STATE_RUNNING
, &dev
->mphy
.state
);
56 mt76x0e_flush(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
57 u32 queues
, bool drop
)
61 static const struct ieee80211_ops mt76x0e_ops
= {
63 .start
= mt76x0e_start
,
65 .add_interface
= mt76x02_add_interface
,
66 .remove_interface
= mt76x02_remove_interface
,
67 .config
= mt76x0_config
,
68 .configure_filter
= mt76x02_configure_filter
,
69 .bss_info_changed
= mt76x02_bss_info_changed
,
70 .sta_state
= mt76_sta_state
,
71 .sta_pre_rcu_remove
= mt76_sta_pre_rcu_remove
,
72 .set_key
= mt76x02_set_key
,
73 .conf_tx
= mt76x02_conf_tx
,
74 .sw_scan_start
= mt76_sw_scan
,
75 .sw_scan_complete
= mt76x02_sw_scan_complete
,
76 .ampdu_action
= mt76x02_ampdu_action
,
77 .sta_rate_tbl_update
= mt76x02_sta_rate_tbl_update
,
78 .wake_tx_queue
= mt76_wake_tx_queue
,
79 .get_survey
= mt76_get_survey
,
80 .get_txpower
= mt76_get_txpower
,
81 .flush
= mt76x0e_flush
,
82 .set_tim
= mt76_set_tim
,
83 .release_buffered_frames
= mt76_release_buffered_frames
,
84 .set_coverage_class
= mt76x02_set_coverage_class
,
85 .set_rts_threshold
= mt76x02_set_rts_threshold
,
86 .get_antenna
= mt76_get_antenna
,
87 .reconfig_complete
= mt76x02_reconfig_complete
,
90 static int mt76x0e_init_hardware(struct mt76x02_dev
*dev
, bool resume
)
94 mt76x0_chip_onoff(dev
, true, false);
95 if (!mt76x02_wait_for_mac(&dev
->mt76
))
98 mt76x02_dma_disable(dev
);
99 err
= mt76x0e_mcu_init(dev
);
104 err
= mt76x02_dma_init(dev
);
109 err
= mt76x0_init_hardware(dev
);
113 mt76x02e_init_beacon_config(dev
);
115 if (mt76_chip(&dev
->mt76
) == 0x7610) {
118 mt76_clear(dev
, MT_COEXCFG0
, BIT(0));
120 val
= mt76x02_eeprom_get(dev
, MT_EE_NIC_CONF_0
);
121 if (!(val
& MT_EE_NIC_CONF_0_PA_IO_CURRENT
))
122 mt76_set(dev
, MT_XO_CTRL7
, 0xc03);
125 mt76_clear(dev
, 0x110, BIT(9));
126 mt76_set(dev
, MT_MAX_LEN_CFG
, BIT(13));
131 static int mt76x0e_register_device(struct mt76x02_dev
*dev
)
135 err
= mt76x0e_init_hardware(dev
, false);
139 err
= mt76x0_register_device(dev
);
143 set_bit(MT76_STATE_INITIALIZED
, &dev
->mphy
.state
);
149 mt76x0e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
151 static const struct mt76_driver_ops drv_ops
= {
152 .txwi_size
= sizeof(struct mt76x02_txwi
),
153 .drv_flags
= MT_DRV_TX_ALIGNED4_SKBS
|
154 MT_DRV_SW_RX_AIRTIME
,
155 .survey_flags
= SURVEY_INFO_TIME_TX
,
156 .update_survey
= mt76x02_update_channel
,
157 .tx_prepare_skb
= mt76x02_tx_prepare_skb
,
158 .tx_complete_skb
= mt76x02_tx_complete_skb
,
159 .rx_skb
= mt76x02_queue_rx_skb
,
160 .rx_poll_complete
= mt76x02_rx_poll_complete
,
161 .sta_ps
= mt76x02_sta_ps
,
162 .sta_add
= mt76x02_sta_add
,
163 .sta_remove
= mt76x02_sta_remove
,
165 struct mt76x02_dev
*dev
;
166 struct mt76_dev
*mdev
;
169 ret
= pcim_enable_device(pdev
);
173 ret
= pcim_iomap_regions(pdev
, BIT(0), pci_name(pdev
));
177 pci_set_master(pdev
);
179 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
183 mt76_pci_disable_aspm(pdev
);
185 mdev
= mt76_alloc_device(&pdev
->dev
, sizeof(*dev
), &mt76x0e_ops
,
190 dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
191 mutex_init(&dev
->phy_mutex
);
193 mt76_mmio_init(mdev
, pcim_iomap_table(pdev
)[0]);
195 mdev
->rev
= mt76_rr(dev
, MT_ASIC_VERSION
);
196 dev_info(mdev
->dev
, "ASIC revision: %08x\n", mdev
->rev
);
198 mt76_wr(dev
, MT_INT_MASK_CSR
, 0);
200 ret
= devm_request_irq(mdev
->dev
, pdev
->irq
, mt76x02_irq_handler
,
201 IRQF_SHARED
, KBUILD_MODNAME
, dev
);
205 ret
= mt76x0e_register_device(dev
);
212 mt76_free_device(&dev
->mt76
);
217 static void mt76x0e_cleanup(struct mt76x02_dev
*dev
)
219 clear_bit(MT76_STATE_INITIALIZED
, &dev
->mphy
.state
);
220 tasklet_disable(&dev
->mt76
.pre_tbtt_tasklet
);
221 mt76x0_chip_onoff(dev
, false, false);
222 mt76x0e_stop_hw(dev
);
223 mt76_dma_cleanup(&dev
->mt76
);
224 mt76x02_mcu_cleanup(dev
);
228 mt76x0e_remove(struct pci_dev
*pdev
)
230 struct mt76_dev
*mdev
= pci_get_drvdata(pdev
);
231 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
233 mt76_unregister_device(mdev
);
234 mt76x0e_cleanup(dev
);
235 mt76_free_device(mdev
);
239 static int mt76x0e_suspend(struct pci_dev
*pdev
, pm_message_t state
)
241 struct mt76_dev
*mdev
= pci_get_drvdata(pdev
);
242 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
245 mt76_worker_disable(&mdev
->tx_worker
);
246 for (i
= 0; i
< ARRAY_SIZE(mdev
->phy
.q_tx
); i
++)
247 mt76_queue_tx_cleanup(dev
, mdev
->phy
.q_tx
[i
], true);
248 for (i
= 0; i
< ARRAY_SIZE(mdev
->q_mcu
); i
++)
249 mt76_queue_tx_cleanup(dev
, mdev
->q_mcu
[i
], true);
250 napi_disable(&mdev
->tx_napi
);
252 mt76_for_each_q_rx(mdev
, i
)
253 napi_disable(&mdev
->napi
[i
]);
255 mt76x02_dma_disable(dev
);
256 mt76x02_mcu_cleanup(dev
);
257 mt76x0_chip_onoff(dev
, false, false);
259 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), true);
260 pci_save_state(pdev
);
262 return pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
265 static int mt76x0e_resume(struct pci_dev
*pdev
)
267 struct mt76_dev
*mdev
= pci_get_drvdata(pdev
);
268 struct mt76x02_dev
*dev
= container_of(mdev
, struct mt76x02_dev
, mt76
);
271 err
= pci_set_power_state(pdev
, PCI_D0
);
275 pci_restore_state(pdev
);
277 mt76_worker_enable(&mdev
->tx_worker
);
279 mt76_for_each_q_rx(mdev
, i
) {
280 mt76_queue_rx_reset(dev
, i
);
281 napi_enable(&mdev
->napi
[i
]);
282 napi_schedule(&mdev
->napi
[i
]);
285 napi_enable(&mdev
->tx_napi
);
286 napi_schedule(&mdev
->tx_napi
);
288 return mt76x0e_init_hardware(dev
, true);
290 #endif /* CONFIG_PM */
292 static const struct pci_device_id mt76x0e_device_table
[] = {
293 { PCI_DEVICE(0x14c3, 0x7610) },
294 { PCI_DEVICE(0x14c3, 0x7630) },
295 { PCI_DEVICE(0x14c3, 0x7650) },
299 MODULE_DEVICE_TABLE(pci
, mt76x0e_device_table
);
300 MODULE_FIRMWARE(MT7610E_FIRMWARE
);
301 MODULE_FIRMWARE(MT7650E_FIRMWARE
);
302 MODULE_LICENSE("Dual BSD/GPL");
304 static struct pci_driver mt76x0e_driver
= {
305 .name
= KBUILD_MODNAME
,
306 .id_table
= mt76x0e_device_table
,
307 .probe
= mt76x0e_probe
,
308 .remove
= mt76x0e_remove
,
310 .suspend
= mt76x0e_suspend
,
311 .resume
= mt76x0e_resume
,
312 #endif /* CONFIG_PM */
315 module_pci_driver(mt76x0e_driver
);