* @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
* @dma_io_tlb_pools: List of transient swiotlb memory pools.
* @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
#ifdef CONFIG_SWIOTLB_DYNAMIC
struct list_head dma_io_tlb_pools;
spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
#endif
/* arch specific additions */
struct dev_archdata archdata;
if (!mem)
return false;
- if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC))
+ if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
+ * swiotlb_dyn_alloc(), which modify the RCU lists.
+ */
+ smp_rmb();
return swiotlb_find_pool(dev, paddr);
+ }
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
}
add_mem_pool(mem, pool);
- /* Pairs with smp_rmb() in swiotlb_find_pool(). */
+ /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
smp_wmb();
}
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
-
rcu_read_lock();
list_for_each_entry_rcu(pool, &mem->pools, node) {
if (paddr >= pool->start && paddr < pool->end)
#ifdef CONFIG_SWIOTLB_DYNAMIC
INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
spin_lock_init(&dev->dma_io_tlb_lock);
+ dev->dma_uses_io_tlb = false;
#endif
}
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
- /* Pairs with smp_rmb() in swiotlb_find_pool(). */
- smp_wmb();
found:
+ dev->dma_uses_io_tlb = true;
+ /* Pairs with smp_rmb() in is_swiotlb_buffer() */
+ smp_wmb();
+
*retpool = pool;
return index;
}