if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
+ ttm_dma->cpu_address[index] = d_page->vaddr;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
+ ttm_dma->cpu_address[i] = 0;
ttm_dma->dma_address[i] = 0;
}
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address));
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->ttm.pages) +
+ sizeof(*ttm->dma_address) +
+ sizeof(*ttm->cpu_address));
+ ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+ ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
}
#ifdef CONFIG_X86
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages || !ttm_dma->dma_address) {
+ if (!ttm->pages) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
drm_free_large(ttm->pages);
ttm->pages = NULL;
- drm_free_large(ttm_dma->dma_address);
+ ttm_dma->cpu_address = NULL;
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
+ * @cpu_address: The CPU address of the pages
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
+ void **cpu_address;
dma_addr_t *dma_address;
struct list_head pages_list;
};