{
dma_addr_t dma;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev,
- PANEL_SIZE, &dma, GFP_KERNEL);
+ fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, PANEL_SIZE, &dma,
+ GFP_KERNEL);
if (!fb->fb.screen_base) {
printk(KERN_ERR "CLCD: unable to map framebuffer\n");
return -ENOMEM;
static int lpc32xx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base, fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
static void lpc32xx_clcd_remove(struct clcd_fb *fb)
{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
+ fb->fb.fix.smem_start);
}
/*
fb->panel = netx_panel;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, 1024*1024,
- &dma, GFP_KERNEL);
+ fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, 1024 * 1024, &dma,
+ GFP_KERNEL);
if (!fb->fb.screen_base) {
printk(KERN_ERR "CLCD: unable to map framebuffer\n");
return -ENOMEM;
int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
void netx_clcd_remove(struct clcd_fb *fb)
{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
+ fb->fb.fix.smem_start);
}
static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL);
panel_size = ((panel->mode.xres * panel->mode.yres) * panel->bpp) / 8;
panel_size = ALIGN(panel_size, PAGE_SIZE);
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev,
- panel_size, &dma, GFP_KERNEL);
+ fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, panel_size, &dma,
+ GFP_KERNEL);
if (!fb->fb.screen_base) {
pr_err("CLCD: unable to map framebuffer\n");
int nspire_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base, fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
void nspire_clcd_remove(struct clcd_fb *fb)
{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
+ fb->fb.fix.smem_start);
}
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
+ adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
if (!adev->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_free_adev;
* requires that we explicitly flush the writes
*/
mv_chan->dma_desc_pool_virt =
- dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
- &mv_chan->dma_desc_pool, GFP_KERNEL);
+ dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
+ GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
return 0;
/* allocate FIFO descriptor space, but only if necessary */
- bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- &bchan->fifo_phys, GFP_KERNEL);
+ bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
if (!bchan->fifo_virt) {
dev_err(bdev->dev, "Failed to allocate desc fifo\n");
bam_reset_channel(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
- bchan->fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- bdev->channels[i].fifo_virt,
- bdev->channels[i].fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
}
tasklet_kill(&bdev->task);
if (IS_ERR(cma_obj))
return cma_obj;
- cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
- &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
size);
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) {
- dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
- cma_obj->vaddr, cma_obj->paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
if (!cmdbuf)
return NULL;
- cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
- GFP_KERNEL);
+ cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
+ GFP_KERNEL);
if (!cmdbuf->vaddr) {
kfree(cmdbuf);
return NULL;
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
- dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
- cmdbuf->vaddr, cmdbuf->paddr);
+ dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
+ cmdbuf->paddr);
kfree(cmdbuf);
}
kfree(omap_dmm->engines);
if (omap_dmm->refill_va)
- dma_free_writecombine(omap_dmm->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- omap_dmm->refill_va,
- omap_dmm->refill_pa);
+ dma_free_wc(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va, omap_dmm->refill_pa);
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
- omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- &omap_dmm->refill_pa, GFP_KERNEL);
+ omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
goto fail;
omap_gem_detach_pages(obj);
if (!is_shmem(obj)) {
- dma_free_writecombine(dev->dev, obj->size,
- omap_obj->vaddr, omap_obj->paddr);
+ dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
+ omap_obj->paddr);
} else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr);
}
/* attempt to allocate contiguous memory if we don't
* have DMM for remappign discontiguous buffers
*/
- omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
- &omap_obj->paddr, GFP_KERNEL);
+ omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
+ &omap_obj->paddr, GFP_KERNEL);
if (!omap_obj->vaddr) {
kfree(omap_obj);
cursor->height = src_h;
if (cursor->pixmap.base)
- dma_free_writecombine(cursor->dev,
- cursor->pixmap.size,
- cursor->pixmap.base,
- cursor->pixmap.paddr);
+ dma_free_wc(cursor->dev, cursor->pixmap.size,
+ cursor->pixmap.base, cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
- cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
- cursor->pixmap.size,
- &cursor->pixmap.paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->pixmap.base = dma_alloc_wc(cursor->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
return;
/* Allocate clut buffer */
size = 0x100 * sizeof(unsigned short);
- cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
return &cursor->plane.drm_plane;
err_plane:
- dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+ dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
err_clut:
devm_kfree(dev, cursor);
return NULL;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_writecombine(gdp->dev,
- size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
/* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
- hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
- &hqvdp->hqvdp_cmd_paddr,
- GFP_KERNEL | GFP_DMA);
+ hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
+ &hqvdp->hqvdp_cmd_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) {
DRM_ERROR("Failed to allocate memory for VDP cmd\n");
return;
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
- bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
}
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
- GFP_KERNEL | __GFP_NOWARN);
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
"failed to allocate buffer of size %zu\n",
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
- bo->paddr, gem->size);
+ ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ gem->size);
if (ret) {
drm_gem_vm_close(vma);
return ret;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
- bo->base.vaddr, bo->base.paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
+ bo->base.paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0)
- dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
- pb->mapped, pb->phys);
+ dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
+ pb->phys);
pb->mapped = NULL;
pb->phys = 0;
pb->pos = 0;
/* allocate and map pushbuffer memory */
- pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
- &pb->phys, GFP_KERNEL);
+ pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
+ GFP_KERNEL);
if (!pb->mapped)
goto fail;
size += g->words * sizeof(u32);
}
- job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
- &job->gather_copy,
- GFP_KERNEL);
+ job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ GFP_KERNEL);
if (!job->gather_copy_mapped) {
job->gather_copy_mapped = NULL;
return -ENOMEM;
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_writecombine(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped,
- job->gather_copy);
+ dma_free_wc(job->channel->dev, job->gather_copy_size,
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &ctx->dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
+ ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
+ ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev,
"failed to allocate bitstream ringbuffer");
if (ctx->bitstream.vaddr == NULL)
return;
- dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0);
}
* for the framebuffer if we are not using
* VRAM.
*/
- base = dma_alloc_writecombine(current_par.dev, size, &handle,
- GFP_KERNEL);
+ base = dma_alloc_wc(current_par.dev, size, &handle,
+ GFP_KERNEL);
if (base == NULL) {
printk(KERN_ERR "acornfb: unable to allocate screen "
"memory\n");
{
dma_addr_t dma;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
+ fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, framesize, &dma,
+ GFP_KERNEL);
if (!fb->fb.screen_base) {
pr_err("CLCD: unable to map framebuffer\n");
return -ENOMEM;
int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
void versatile_clcd_remove_dma(struct clcd_fb *fb)
{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
+ fb->fb.fix.smem_start);
}
static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
static void clcdfb_of_dma_remove(struct clcd_fb *fb)
{
struct fb_info *info = sinfo->info;
- dma_free_writecombine(info->device, info->fix.smem_len,
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(info->device, info->fix.smem_len, info->screen_base,
+ info->fix.smem_start);
}
/**
* ((var->bits_per_pixel + 7) / 8));
info->fix.smem_len = max(smem_len, sinfo->smem_len);
- info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
- (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(info->device, info->fix.smem_len,
+ (dma_addr_t *)&info->fix.smem_start,
+ GFP_KERNEL);
if (!info->screen_base) {
return -ENOMEM;
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
if (offset < info->fix.smem_len) {
- return dma_mmap_writecombine(info->dev, vma, info->screen_base,
- info->fix.smem_start,
- info->fix.smem_len);
+ return dma_mmap_wc(info->dev, vma, info->screen_base,
+ info->fix.smem_start, info->fix.smem_len);
}
return -EINVAL;
/* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
- virt_addr = dma_alloc_writecombine(info->dev, fb_size,
- &phys_addr, GFP_KERNEL);
+ virt_addr = dma_alloc_wc(info->dev, fb_size, &phys_addr, GFP_KERNEL);
if (!virt_addr)
return -ENOMEM;
} else {
/* try to allocate memory with the classical allocator
* this has high chance to fail on low memory machines */
- gbe_mem = dma_alloc_writecombine(NULL, gbe_mem_size,
- &gbe_dma_addr, GFP_KERNEL);
+ gbe_mem = dma_alloc_wc(NULL, gbe_mem_size, &gbe_dma_addr,
+ GFP_KERNEL);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n");
ret = -ENOMEM;
out_gbe_unmap:
arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr)
- dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+ dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
out_tiles_free:
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr)
- dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+ dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
}
fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
- info->screen_base = dma_alloc_writecombine(&pdev->dev, fbi->map_size,
- &fbi->map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(&pdev->dev, fbi->map_size,
+ &fbi->map_dma, GFP_KERNEL);
if (!info->screen_base) {
dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
failed_platform_init:
- dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
- fbi->map_dma);
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
failed_map:
iounmap(fbi->regs);
failed_ioremap:
kfree(info->pseudo_palette);
framebuffer_release(info);
- dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
- fbi->map_dma);
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
int retval = 0;
dma_addr_t addr;
- fbi->screen_base = dma_alloc_writecombine(fbi->device,
- mem_len,
- &addr, GFP_DMA | GFP_KERNEL);
+ fbi->screen_base = dma_alloc_wc(fbi->device, mem_len, &addr,
+ GFP_DMA | GFP_KERNEL);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
*/
static int mx3fb_unmap_video_memory(struct fb_info *fbi)
{
- dma_free_writecombine(fbi->device, fbi->fix.smem_len,
- fbi->screen_base, fbi->fix.smem_start);
+ dma_free_wc(fbi->device, fbi->fix.smem_len, fbi->screen_base,
+ fbi->fix.smem_start);
fbi->screen_base = NULL;
mutex_lock(&fbi->mm_lock);
dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n",
fbi, map_size);
- info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
- &map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
+ GFP_KERNEL);
if (!info->screen_base)
return -ENOMEM;
static inline void nuc900fb_unmap_video_memory(struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
}
static irqreturn_t nuc900fb_irqhandler(int irq, void *dev_id)
static int alloc_palette_ram(void)
{
- lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
- MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
+ lcdc.palette_virt = dma_alloc_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
+ &lcdc.palette_phys, GFP_KERNEL);
if (lcdc.palette_virt == NULL) {
dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
return -ENOMEM;
static void free_palette_ram(void)
{
- dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
- lcdc.palette_virt, lcdc.palette_phys);
+ dma_free_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE, lcdc.palette_virt,
+ lcdc.palette_phys);
}
static int alloc_fbmem(struct omapfb_mem_region *region)
if (region->size > frame_size)
frame_size = region->size;
lcdc.vram_size = frame_size;
- lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
- lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
+ lcdc.vram_virt = dma_alloc_wc(lcdc.fbdev->dev, lcdc.vram_size,
+ &lcdc.vram_phys, GFP_KERNEL);
if (lcdc.vram_virt == NULL) {
dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
return -ENOMEM;
static void free_fbmem(void)
{
- dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
- lcdc.vram_virt, lcdc.vram_phys);
+ dma_free_wc(lcdc.fbdev->dev, lcdc.vram_size, lcdc.vram_virt,
+ lcdc.vram_phys);
}
static int setup_fbmem(struct omapfb_mem_desc *req_md)
*/
info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
- info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len,
- &fbi->fb_start_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, info->fix.smem_len,
+ &fbi->fb_start_dma, GFP_KERNEL);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto failed_free_info;
irq = platform_get_irq(pdev, 0);
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk);
free_pages_exact(fbi->video_mem, fbi->video_mem_size);
- dma_free_writecombine(&dev->dev, fbi->dma_buff_size,
- fbi->dma_buff, fbi->dma_buff_phys);
+ dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
+ fbi->dma_buff_phys);
iounmap(fbi->mmio_base);
dev_dbg(sfb->dev, "want %u bytes for window\n", size);
- fbi->screen_base = dma_alloc_writecombine(sfb->dev, size,
- &map_dma, GFP_KERNEL);
+ fbi->screen_base = dma_alloc_wc(sfb->dev, size, &map_dma, GFP_KERNEL);
if (!fbi->screen_base)
return -ENOMEM;
struct fb_info *fbi = win->fbinfo;
if (fbi->screen_base)
- dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
- fbi->screen_base, fbi->fix.smem_start);
+ dma_free_wc(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
+ fbi->screen_base, fbi->fix.smem_start);
}
/**
dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
- info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
- &map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
+ GFP_KERNEL);
if (info->screen_base) {
/* prevent initial garbage on screen */
{
struct s3c2410fb_info *fbi = info->par;
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
}
static inline void modify_gpio(void __iomem *reg,
if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */
- return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu,
- fbi->map_dma, fbi->map_size);
+ return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
+ fbi->map_size);
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
* of the framebuffer.
*/
fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
- fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
- &fbi->map_dma, GFP_KERNEL);
+ fbi->map_cpu = dma_alloc_wc(fbi->dev, fbi->map_size, &fbi->map_dma,
+ GFP_KERNEL);
if (fbi->map_cpu) {
fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t gfp)
+static inline void *dma_alloc_wc(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
}
+#ifndef dma_alloc_writecombine
+#define dma_alloc_writecombine dma_alloc_wc
+#endif
-static inline void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr)
+static inline void dma_free_wc(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
}
+#ifndef dma_free_writecombine
+#define dma_free_writecombine dma_free_wc
+#endif
-static inline int dma_mmap_writecombine(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
+static inline int dma_mmap_wc(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
}
+#ifndef dma_mmap_writecombine
+#define dma_mmap_writecombine dma_mmap_wc
+#endif
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- return dma_mmap_writecombine(substream->pcm->card->dev, vma,
- runtime->dma_area,
- runtime->dma_addr,
- runtime->dma_bytes);
+ return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
}
EXPORT_SYMBOL(pxa2xx_pcm_mmap);
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
- buf->area = dma_alloc_writecombine(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
+ buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
buf = &substream->dma_buffer;
if (!buf->area)
continue;
- dma_free_writecombine(pcm->card->dev, buf->bytes,
- buf->area, buf->addr);
+ dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
}
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
- ret = dma_mmap_writecombine(substream->pcm->card->dev, vma,
- runtime->dma_area, runtime->dma_addr, runtime->dma_bytes);
+ ret = dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
pr_debug("%s: ret: %d %p %pad 0x%08x\n", __func__, ret,
runtime->dma_area,
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
- buf->area = dma_alloc_writecombine(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
+ buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
buf->bytes = size;
if (!buf->area)
continue;
- dma_free_writecombine(pcm->card->dev, buf->bytes,
- buf->area, buf->addr);
+ dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
}
{
struct snd_pcm_runtime *runtime = substream->runtime;
- return dma_mmap_writecombine(substream->pcm->card->dev, vma,
- runtime->dma_area,
- runtime->dma_addr,
- runtime->dma_bytes);
+ return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
}
static struct snd_pcm_ops nuc900_dma_ops = {
{
struct snd_pcm_runtime *runtime = substream->runtime;
- return dma_mmap_writecombine(substream->pcm->card->dev, vma,
- runtime->dma_area,
- runtime->dma_addr,
- runtime->dma_bytes);
+ return dma_mmap_wc(substream->pcm->card->dev, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
}
static struct snd_pcm_ops omap_pcm_ops = {
buf->dev.type = SNDRV_DMA_TYPE_DEV;
buf->dev.dev = pcm->card->dev;
buf->private_data = NULL;
- buf->area = dma_alloc_writecombine(pcm->card->dev, size,
- &buf->addr, GFP_KERNEL);
+ buf->area = dma_alloc_wc(pcm->card->dev, size, &buf->addr, GFP_KERNEL);
if (!buf->area)
return -ENOMEM;
if (!buf->area)
continue;
- dma_free_writecombine(pcm->card->dev, buf->bytes,
- buf->area, buf->addr);
+ dma_free_wc(pcm->card->dev, buf->bytes, buf->area, buf->addr);
buf->area = NULL;
}
}