return pt;
}
-static struct i915_page_directory *__alloc_pd(void)
+static struct i915_page_directory *__alloc_pd(size_t sz)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
{
struct i915_page_directory *pd;
- pd = __alloc_pd();
+ pd = __alloc_pd(sizeof(*pd));
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
- GEM_BUG_ON(atomic_read(px_used(pd)) > 512);
+ GEM_BUG_ON(atomic_read(px_used(pd)) > ARRAY_SIZE(pd->entry));
atomic_inc(px_used(pd));
pd->entry[idx] = to;
return end - start;
}
-static void gen8_free_page_tables(struct i915_address_space *vm,
- struct i915_page_directory *pd)
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
{
- int i;
-
- for (i = 0; i < I915_PDES; i++) {
- if (pd->entry[i])
- free_pd(vm, pd->entry[i]);
- }
-}
-
-static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory *pdp)
-{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- int i;
-
- for (i = 0; i < pdpes; i++) {
- if (!pdp->entry[i])
- continue;
-
- gen8_free_page_tables(vm, pdp->entry[i]);
- free_pd(vm, pdp->entry[i]);
- }
-
- free_px(vm, pdp);
-}
-
-static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pml4 = ppgtt->pd;
- int i;
-
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
+ if (lvl) {
+ void **pde = pd->entry;
- if (!pdp)
- continue;
+ do {
+ if (!*pde)
+ continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
}
- free_px(&ppgtt->vm, pml4);
+ free_px(vm, pd);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct drm_i915_private *i915 = vm->i915;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (i915_vm_is_4lvl(vm))
- gen8_ppgtt_cleanup_4lvl(ppgtt);
- else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
-
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ vm->total >> __gen8_pte_shift(vm->top),
+ vm->top);
free_scratch(vm);
}
struct i915_page_directory *pdp = ppgtt->pd;
struct i915_page_directory *pd;
u64 start = 0, length = ppgtt->vm.total;
- u64 from = start;
unsigned int pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
pd = alloc_pd(vm);
if (IS_ERR(pd))
- goto unwind;
+ return PTR_ERR(pd);
fill_px(pd, vm->scratch[1].encode);
set_pd_entry(pdp, pdpe, pd);
}
return 0;
-
-unwind:
- gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- atomic_set(px_used(pdp), 0);
- return -ENOMEM;
}
static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- return pd;
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
+
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
return pd;
return ppgtt;
err_free_pd:
- free_px(&ppgtt->vm, ppgtt->pd);
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ ppgtt->vm.total >> __gen8_pte_shift(ppgtt->vm.top),
+ ppgtt->vm.top);
err_free_scratch:
free_scratch(&ppgtt->vm);
err_free:
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->base.pd = __alloc_pd();
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
if (!ppgtt->base.pd) {
err = -ENOMEM;
goto err_free;