void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
void mt76x0_tx_stat(struct work_struct *work);
+void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+
int mt76x0_dma_init(struct mt76x0_dev *dev);
void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
return mt76x02_set_txinfo(skb, wcid, q2ep(q->hw_idx));
}
+void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt76x0_dev *dev = container_of(mdev, struct mt76x0_dev, mt76);
+ void *rxwi = skb->data;
+
+ skb_pull(skb, sizeof(struct mt76x02_rxwi));
+ if (!mt76x0_mac_process_rx(dev, skb, rxwi)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ mt76_rx(&dev->mt76, q, skb);
+}
+
void mt76x0_tx_stat(struct work_struct *work)
{
struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,