static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
gen8_pde_t scratch_pde;
- scratch_pde = gen8_pde_encode(px_dma(ppgtt->scratch_pt),
- I915_CACHE_LLC);
+ scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
fill_px(vm->dev, pd, scratch_pde);
}
free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
}
- free_pd(ppgtt->base.dev, ppgtt->scratch_pd);
- free_pt(ppgtt->base.dev, ppgtt->scratch_pt);
+ free_pd(vm->dev, vm->scratch_pd);
+ free_pt(vm->dev, vm->scratch_pt);
}
/**
/* Don't reallocate page tables */
if (pt) {
/* Scratch is never allocated this way */
- WARN_ON(pt == ppgtt->scratch_pt);
+ WARN_ON(pt == ppgtt->base.scratch_pt);
continue;
}
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->scratch_pt = alloc_pt(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
+ ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->base.scratch_pt))
+ return PTR_ERR(ppgtt->base.scratch_pt);
- ppgtt->scratch_pd = alloc_pd(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pd))
- return PTR_ERR(ppgtt->scratch_pd);
+ ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->base.scratch_pd))
+ return PTR_ERR(ppgtt->base.scratch_pd);
- gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
- gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+ gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
+ gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
ppgtt->base.start = 0;
ppgtt->base.total = 1ULL << 32;
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
- scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, true, 0);
+ scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+ I915_CACHE_LLC, true, 0);
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
* tables.
*/
gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
- if (pt != ppgtt->scratch_pt) {
+ if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
}
for_each_set_bit(pde, new_page_tables, I915_PDES) {
struct i915_page_table *pt = ppgtt->pd.page_table[pde];
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ ppgtt->pd.page_table[pde] = vm->scratch_pt;
free_pt(vm->dev, pt);
}
struct i915_page_table *pt;
uint32_t pde;
-
drm_mm_remove_node(&ppgtt->node);
gen6_for_all_pdes(pt, ppgtt, pde) {
- if (pt != ppgtt->scratch_pt)
+ if (pt != vm->scratch_pt)
free_pt(ppgtt->base.dev, pt);
}
- free_pt(ppgtt->base.dev, ppgtt->scratch_pt);
+ free_pt(vm->dev, vm->scratch_pt);
}
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
- ppgtt->scratch_pt = alloc_pt(ppgtt->base.dev);
- if (IS_ERR(ppgtt->scratch_pt))
- return PTR_ERR(ppgtt->scratch_pt);
+ ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->base.scratch_pt))
+ return PTR_ERR(ppgtt->base.scratch_pt);
- gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+ gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
return 0;
err_out:
- free_pt(ppgtt->base.dev, ppgtt->scratch_pt);
+ free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
return ret;
}
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
- ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+ ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)