u64_stats_update_end(&rx_stats->syncp);
drop:
- WARN_ON_ONCE(rxq->xdp_save_page);
- rxq->xdp_save_page = virt_to_page(buf_va);
+ WARN_ON_ONCE(rxq->xdp_save_va);
+ /* Save for reuse */
+ rxq->xdp_save_va = buf_va;
++ndev->stats.rx_dropped;
return;
}
+static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
+ dma_addr_t *da, bool is_napi)
+{
+ struct page *page;
+ void *va;
+
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ rxq->xdp_save_va = NULL;
+ } else {
+ page = dev_alloc_page();
+ if (!page)
+ return NULL;
+
+ va = page_to_virt(page);
+ }
+
+ *da = dma_map_single(dev, va + XDP_PACKET_HEADROOM, rxq->datasize,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, *da)) {
+ put_page(virt_to_head_page(va));
+ return NULL;
+ }
+
+ return va;
+}
+
+/* Allocate frag for rx buffer, and save the old buf */
+static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq,
+ struct mana_recv_buf_oob *rxoob, void **old_buf)
+{
+ dma_addr_t da;
+ void *va;
+
+ va = mana_get_rxfrag(rxq, dev, &da, true);
+
+ if (!va)
+ return;
+
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
+ *old_buf = rxoob->buf_va;
+
+ rxoob->buf_va = va;
+ rxoob->sgl[0].address = da;
+}
+
static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_comp *cqe)
{
struct mana_recv_buf_oob *rxbuf_oob;
struct mana_port_context *apc;
struct device *dev = gc->dev;
- void *new_buf, *old_buf;
- struct page *new_page;
+ void *old_buf = NULL;
u32 curr, pktlen;
- dma_addr_t da;
apc = netdev_priv(ndev);
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_page) {
- new_page = rxq->xdp_save_page;
- rxq->xdp_save_page = NULL;
- } else {
- new_page = alloc_page(GFP_ATOMIC);
- }
-
- if (new_page) {
- da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, da)) {
- __free_page(new_page);
- new_page = NULL;
- }
- }
-
- new_buf = new_page ? page_to_virt(new_page) : NULL;
-
- if (new_buf) {
- dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
- DMA_FROM_DEVICE);
-
- old_buf = rxbuf_oob->buf_va;
-
- /* refresh the rxbuf_oob with the new page */
- rxbuf_oob->buf_va = new_buf;
- rxbuf_oob->buf_dma_addr = da;
- rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
- } else {
- old_buf = NULL; /* drop the packet if no memory */
- }
+ mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf);
+ /* Unsuccessful refill will have old_buf == NULL.
+ * In this case, mana_rx_skb() will drop the packet.
+ */
mana_rx_skb(old_buf, oob, rxq);
drop:
mana_deinit_cq(apc, &rxq->rx_cq);
- if (rxq->xdp_save_page)
- __free_page(rxq->xdp_save_page);
+ if (rxq->xdp_save_va)
+ put_page(virt_to_head_page(rxq->xdp_save_va));
for (i = 0; i < rxq->num_rx_buf; i++) {
rx_oob = &rxq->rx_oobs[i];
if (!rx_oob->buf_va)
continue;
- dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
- DMA_FROM_DEVICE);
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
- free_page((unsigned long)rx_oob->buf_va);
+ put_page(virt_to_head_page(rx_oob->buf_va));
rx_oob->buf_va = NULL;
}
kfree(rxq);
}
+static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
+ struct mana_rxq *rxq, struct device *dev)
+{
+ dma_addr_t da;
+ void *va;
+
+ va = mana_get_rxfrag(rxq, dev, &da, false);
+
+ if (!va)
+ return -ENOMEM;
+
+ rx_oob->buf_va = va;
+
+ rx_oob->sgl[0].address = da;
+ rx_oob->sgl[0].size = rxq->datasize;
+ rx_oob->sgl[0].mem_key = mem_key;
+
+ return 0;
+}
+
#define MANA_WQE_HEADER_SIZE 16
#define MANA_WQE_SGE_SIZE 16
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct mana_recv_buf_oob *rx_oob;
struct device *dev = gc->dev;
- struct page *page;
- dma_addr_t da;
u32 buf_idx;
+ int ret;
WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
rx_oob = &rxq->rx_oobs[buf_idx];
memset(rx_oob, 0, sizeof(*rx_oob));
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, da)) {
- __free_page(page);
- return -ENOMEM;
- }
-
- rx_oob->buf_va = page_to_virt(page);
- rx_oob->buf_dma_addr = da;
-
rx_oob->num_sge = 1;
- rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
- rx_oob->sgl[0].size = rxq->datasize;
- rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
+
+ ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
+ dev);
+ if (ret)
+ return ret;
rx_oob->wqe_req.sgl = rx_oob->sgl;
rx_oob->wqe_req.num_sge = rx_oob->num_sge;
rxq->ndev = ndev;
rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
rxq->rxq_idx = rxq_idx;
- rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
rxq->rxobj = INVALID_MANA_HANDLE;
+ rxq->datasize = ALIGN(ETH_FRAME_LEN, 64);
+
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
if (err)
goto out;