return 0;
}
-static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+static void unmap_and_free_pt(struct i915_page_table *pt,
struct drm_device *dev)
{
if (WARN_ON(!pt->page))
kfree(pt);
}
-static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
{
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
const size_t count = INTEL_INFO(dev)->gen >= 8 ?
GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
*
* Return: 0 if allocation succeeded.
*/
-static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
+static int alloc_pt_range(struct i915_page_directory *pd, uint16_t pde, size_t count,
struct drm_device *dev)
{
int i, ret;
return -EINVAL;
for (i = pde; i < pde + count; i++) {
- struct i915_page_table_entry *pt = alloc_pt_single(dev);
+ struct i915_page_table *pt = alloc_pt_single(dev);
if (IS_ERR(pt)) {
ret = PTR_ERR(pt);
return ret;
}
-static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+static void unmap_and_free_pd(struct i915_page_directory *pd)
{
if (pd->page) {
__free_page(pd->page);
}
}
-static struct i915_page_directory_entry *alloc_pd_single(void)
+static struct i915_page_directory *alloc_pd_single(void)
{
- struct i915_page_directory_entry *pd;
+ struct i915_page_directory *pd;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct i915_page_directory_entry *pd;
- struct i915_page_table_entry *pt;
+ struct i915_page_directory *pd;
+ struct i915_page_table *pt;
struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
break;
if (pt_vaddr == NULL) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
- struct i915_page_table_entry *pt = pd->page_table[pde];
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
+ struct i915_page_table *pt = pd->page_table[pde];
struct page *page_table = pt->page;
pt_vaddr = kmap_atomic(page_table);
}
}
-static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
{
int i;
PCI_DMA_BIDIRECTIONAL);
for (j = 0; j < I915_PDES; j++) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
- struct i915_page_table_entry *pt;
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
+ struct i915_page_table *pt;
dma_addr_t addr;
if (WARN_ON(!pd->page_table[j]))
const int pt)
{
dma_addr_t pt_addr;
- struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
- struct i915_page_table_entry *ptab = pdir->page_table[pt];
+ struct i915_page_directory *pdir = ppgtt->pdp.page_directory[pd];
+ struct i915_page_table *ptab = pdir->page_table[pt];
struct page *p = ptab->page;
int ret;
* will never need to touch the PDEs again.
*/
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
gen8_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
for (j = 0; j < I915_PDES; j++) {
- struct i915_page_table_entry *pt = pd->page_table[j];
+ struct i915_page_table *pt = pd->page_table[j];
dma_addr_t addr = pt->daddr;
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
I915_CACHE_LLC);
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static void gen6_write_pde(struct i915_page_directory_entry *pd,
- const int pde, struct i915_page_table_entry *pt)
+static void gen6_write_pde(struct i915_page_directory *pd,
+ const int pde, struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
struct i915_hw_ppgtt *ppgtt =
/* Write all the page tables found in the ppgtt structure to incrementing page
* directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
- struct i915_page_directory_entry *pd,
+ struct i915_page_directory *pd,
uint32_t start, uint32_t length)
{
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
uint32_t pde, temp;
gen6_for_each_pde(pt, pd, start, length, temp, pde)
}
static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table_entry *pt)
+ struct i915_page_table *pt)
{
gen6_pte_t *pt_vaddr, scratch_pte;
int i;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
const uint32_t start_save = start, length_save = length;
uint32_t pde, temp;
int ret;
unwind_out:
for_each_set_bit(pde, new_page_tables, I915_PDES) {
- struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+ struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
unmap_and_free_pt(pt, vm->dev);
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
- struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
+ struct i915_page_table *pt = ppgtt->pd.page_table[i];
if (pt != ppgtt->scratch_pt)
unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
- struct i915_page_table_entry *unused;
+ struct i915_page_table *unused;
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
DECLARE_EVENT_CLASS(i915_page_table_entry_update,
TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits),
TP_STRUCT__entry(
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits)
);