tbl[idx % TCES_PER_PAGE] = tce;
}
-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
- unsigned long entry)
+static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
+ struct iommu_table *tbl, unsigned long entry)
{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
+ unsigned long i;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+ for (i = 0; i < subpages; ++i) {
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
+ }
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
- goto invalidate_exit;
+ goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto invalidate_exit;
+ goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
- entry);
- goto invalidate_exit;
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
+ entry + i);
+ goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, entry, npages);
-
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
continue;
if (ret == H_TOO_HARD)
- goto invalidate_exit;
+ return ret;
WARN_ON_ONCE(1);
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
-
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
tbl->it_ops->tce_kill(tbl, entry, pages, true);
}
-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
- unsigned long entry)
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
+ struct iommu_table *tbl, unsigned long entry)
{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
+ unsigned long i;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+ for (i = 0; i < subpages; ++i) {
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
+ }
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
break;
}
+ iommu_tce_kill_rm(tbl, io_entry, subpages);
+
return ret;
}
break;
}
+ iommu_tce_kill_rm(tbl, io_entry, subpages);
+
return ret;
}
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
- iommu_tce_kill_rm(stit->tbl, entry, 1);
-
if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
return ret;
}
}
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto invalidate_exit;
+ goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
- entry);
- goto invalidate_exit;
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
+ entry + i);
+ goto unlock_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, entry, npages);
-
unlock_exit:
if (!prereg)
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
continue;
if (ret == H_TOO_HARD)
- goto invalidate_exit;
+ return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
-
return ret;
}