/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
- if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
- nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
- nvkm_rd32(device, 0x100c80), type);
- }
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+ break;
+ );
nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
- if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
- nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
- nvkm_rd32(device, 0x100c80), type);
- }
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
}
mutex_unlock(&nv_subdev(mmu)->mutex);
}
mutex_lock(&nv_subdev(mmu)->mutex);
nvkm_wr32(device, 0x100810, 0x00000022);
- if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
- nv_warn(mmu, "flush timeout, 0x%08x\n",
- nvkm_rd32(device, 0x100810));
- }
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100810) & 0x00000020)
+ break;
+ );
nvkm_wr32(device, 0x100810, 0x00000000);
mutex_unlock(&nv_subdev(mmu)->mutex);
}
struct nvkm_device *device = mmu->base.subdev.device;
nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
nvkm_wr32(device, 0x100808, 0x00000020);
- if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
- nv_error(mmu, "timeout: 0x%08x\n", nvkm_rd32(device, 0x100808));
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
nvkm_wr32(device, 0x100808, 0x00000000);
}
}
nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
- if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
nv_error(mmu, "vm flush timeout: engine %d\n", vme);
}
mutex_unlock(&nv_subdev(mmu)->mutex);