struct list_head tx_free_q;
spinlock_t ntb_tx_free_q_lock;
void __iomem *tx_mw;
- dma_addr_t tx_mw_phys;
+ phys_addr_t tx_mw_phys;
+ size_t tx_mw_size;
+ dma_addr_t tx_mw_dma_addr;
unsigned int tx_index;
unsigned int tx_max_entry;
unsigned int tx_max_frame;
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count);
+ qp->tx_mw_size = tx_size;
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
if (!qp->tx_mw)
return -EINVAL;
dma_cookie_t cookie;
device = chan->device;
- dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
+ dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
buff_off = (size_t)buf & ~PAGE_MASK;
dest_off = (size_t)dest & ~PAGE_MASK;
qp->rx_dma_chan = NULL;
}
+ if (qp->tx_dma_chan) {
+ qp->tx_mw_dma_addr =
+ dma_map_resource(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_phys, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_dma_addr)) {
+ qp->tx_mw_dma_addr = 0;
+ goto err1;
+ }
+ }
+
dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
qp->tx_dma_chan ? "DMA" : "CPU");
qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
+ if (qp->tx_mw_dma_addr)
+ dma_unmap_resource(qp->tx_dma_chan->device->dev,
+ qp->tx_mw_dma_addr, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
if (qp->tx_dma_chan)
dma_release_channel(qp->tx_dma_chan);
if (qp->rx_dma_chan)
*/
dma_sync_wait(chan, qp->last_cookie);
dmaengine_terminate_all(chan);
+
+ dma_unmap_resource(chan->device->dev,
+ qp->tx_mw_dma_addr, qp->tx_mw_size,
+ DMA_FROM_DEVICE, 0);
+
dma_release_channel(chan);
}