struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* FIXME: assumes fb at stolen base which may not be true */
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
- if (backing)
- return backing;
+ if (backing) {
+ if (drm_gem_private_object_init(dev, &backing->gem, aligned_size) == 0)
+ return backing;
+ psb_gtt_free_range(dev, backing);
+ }
/* Next try using GEM host memory */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
if (backing == NULL)
{
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
- if (r->stolen)
- return -EOPNOTSUPP;
return drm_gem_handle_create(file_priv, &r->gem, handle);
}
kfree(list->map);
list->map = NULL;
}
- drm_gem_object_release(obj);
+ drm_gem_object_release_wrap(obj);
/* This must occur last as it frees up the memory of the GEM object */
psb_gtt_free_range(obj->dev, gtt);
}
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
+ dev_priv = dev->dev_private;
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
>> PAGE_SHIFT;
- /* CPU view of the page, don't go via the GART for CPU writes */
- pfn = page_to_phys(r->pages[page_offset]) >> PAGE_SHIFT;
+ /* CPU view of the page, don't go via the GART for CPU writes */
+ if (r->stolen)
+ pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ else
+ pfn = page_to_pfn(r->pages[page_offset]);
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
fail: