do {
unsigned num_pages = ttm->num_pages - pinned;
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct page **pages = ttm->pages + pinned;
+ struct page **p = pages + pinned;
+ struct amdgpu_ttm_gup_task_list guptask;
+
+ guptask.task = current;
+ spin_lock(>t->guptasklock);
+ list_add(&guptask.list, >t->guptasks);
+ spin_unlock(>t->guptasklock);
+
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, p, NULL);
++ r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+
+ spin_lock(>t->guptasklock);
+ list_del(&guptask.list);
+ spin_unlock(>t->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
if (r < 0)
goto release_pages;
*/
WARN_ON(omap_obj->paddr_cnt > 0);
- /* don't free externally allocated backing memory */
- if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
- if (omap_obj->pages)
+ if (omap_obj->pages) {
+ if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
+ kfree(omap_obj->pages);
+ else
omap_gem_detach_pages(obj);
+ }
- if (!is_shmem(obj)) {
- dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
- omap_obj->paddr);
- } else if (omap_obj->vaddr) {
- vunmap(omap_obj->vaddr);
- }
+ if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
- dma_free_writecombine(dev->dev, obj->size,
- omap_obj->vaddr, omap_obj->paddr);
++ dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
++ omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ } else if (obj->import_attach) {
+ drm_prime_gem_destroy(obj, omap_obj->sgt);
}
- /* don't free externally allocated syncobj */
- if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
- kfree(omap_obj->sync);
+ kfree(omap_obj->sync);
drm_gem_object_release(obj);
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
- omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
- &omap_obj->paddr,
- GFP_KERNEL);
+ /* Allocate memory if needed. */
+ if (flags & OMAP_BO_MEM_DMA_API) {
++ omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
++ &omap_obj->paddr,
++ GFP_KERNEL);
+ if (!omap_obj->vaddr)
+ goto err_release;
+ }
+
+ spin_lock(&priv->list_lock);
+ list_add(&omap_obj->mm_list, &priv->obj_list);
+ spin_unlock(&priv->list_lock);
+
return obj;
- fail:
- omap_gem_free_object(obj);
+ err_release:
+ drm_gem_object_release(obj);
+ err_free:
+ kfree(omap_obj);
return NULL;
}
/* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
- hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
- &dma_addr,
- GFP_KERNEL | GFP_DMA);
+ hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
- &hqvdp->hqvdp_cmd_paddr,
++ &dma_addr,
+ GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) {
DRM_ERROR("Failed to allocate memory for VDP cmd\n");
return;