}
EXPORT_SYMBOL_GPL(mt76x02_set_txinfo);
+bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update)
+{
+ struct mt76x02_tx_status stat;
+
+ if (!mt76x02_mac_load_tx_status(dev, &stat))
+ return false;
+
+ mt76x02_send_tx_status(dev, &stat, update);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
+
MODULE_LICENSE("Dual BSD/GPL");
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
int mt76x02_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep);
+bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
#endif
int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
void mt76x2u_stop_queues(struct mt76x2_dev *dev);
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
return skb_cow(skb, need_head);
}
-bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
- struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
- struct mt76x02_tx_status stat;
-
- if (!mt76x02_mac_load_tx_status(&dev->mt76, &stat))
- return false;
-
- mt76x02_send_tx_status(&dev->mt76, &stat, update);
-
- return true;
-}
-
int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
static const struct mt76_driver_ops drv_ops = {
.tx_prepare_skb = mt76x2u_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
- .tx_status_data = mt76x2u_tx_status_data,
+ .tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x2_queue_rx_skb,
};
struct mt76x2_dev *dev;