4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
8 * Copyright (C) 2010 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <dspbridge/host_os.h>
20 #include <plat/dmtimer.h>
21 #include <dspbridge/dbdefs.h>
22 #include <dspbridge/dev.h>
23 #include <dspbridge/io_sm.h>
24 #include <dspbridge/dspdeh.h>
27 #include <dspbridge/dsp-mmu.h>
29 #define MMU_CNTL_TWL_EN (1 << 2)
31 static struct tasklet_struct mmu_tasklet
;
33 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34 static void mmu_fault_print_stack(struct bridge_dev_context
*dev_context
)
39 struct iommu
*mmu
= dev_context
->dsp_mmu
;
40 dummy_addr
= (void *)__get_free_page(GFP_ATOMIC
);
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
47 tmp
= iommu_read_reg(mmu
, MMU_CNTL
);
48 tmp
&= ~MMU_CNTL_TWL_EN
;
49 iommu_write_reg(mmu
, tmp
, MMU_CNTL
);
50 fa
= iommu_read_reg(mmu
, MMU_FAULT_AD
);
51 e
.da
= fa
& PAGE_MASK
;
52 e
.pa
= virt_to_phys(dummy_addr
);
55 e
.pgsz
= IOVMF_PGSZ_4K
& MMU_CAM_PGSZ_MASK
;
56 e
.endian
= MMU_RAM_ENDIAN_LITTLE
;
57 e
.elsz
= MMU_RAM_ELSZ_32
;
60 load_iotlb_entry(mmu
, &e
);
62 dsp_clk_enable(DSP_CLK_GPT8
);
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8
, 0xfffffffe);
66 /* Clear MMU interrupt */
67 tmp
= iommu_read_reg(mmu
, MMU_IRQSTATUS
);
68 iommu_write_reg(mmu
, tmp
, MMU_IRQSTATUS
);
70 dump_dsp_stack(dev_context
);
71 dsp_clk_disable(DSP_CLK_GPT8
);
73 iopgtable_clear_entry(mmu
, fa
);
74 free_page((unsigned long)dummy_addr
);
79 static void fault_tasklet(unsigned long data
)
81 struct iommu
*mmu
= (struct iommu
*)data
;
82 struct bridge_dev_context
*dev_ctx
;
85 dev_get_deh_mgr(dev_get_first(), &dm
);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx
);
91 fa
= iommu_read_reg(mmu
, MMU_FAULT_AD
);
93 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx
);
95 dump_dl_modules(dev_ctx
);
96 mmu_fault_print_stack(dev_ctx
);
99 bridge_deh_notify(dm
, DSP_MMUFAULT
, fa
);
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
106 static int mmu_fault_callback(struct iommu
*mmu
)
111 iommu_write_reg(mmu
, 0, MMU_IRQENABLE
);
112 tasklet_schedule(&mmu_tasklet
);
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
123 struct iommu
*dsp_mmu_init()
127 mmu
= iommu_get("iva2");
130 tasklet_init(&mmu_tasklet
, fault_tasklet
, (unsigned long)mmu
);
131 mmu
->isr
= mmu_fault_callback
;
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
141 * This function destroys dsp mmu module.
144 void dsp_mmu_exit(struct iommu
*mmu
)
148 tasklet_kill(&mmu_tasklet
);
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
157 static u32
user_va2_pa(struct mm_struct
*mm
, u32 address
)
163 pgd
= pgd_offset(mm
, address
);
164 if (!(pgd_none(*pgd
) || pgd_bad(*pgd
))) {
165 pmd
= pmd_offset(pgd
, address
);
166 if (!(pmd_none(*pmd
) || pmd_bad(*pmd
))) {
167 ptep
= pte_offset_map(pmd
, address
);
170 if (pte_present(pte
))
171 return pte
& PAGE_MASK
;
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
187 static int get_io_pages(struct mm_struct
*mm
, u32 uva
, unsigned pages
,
188 struct page
**usr_pgs
)
194 for (i
= 0; i
< pages
; i
++) {
195 pa
= user_va2_pa(mm
, uva
);
197 if (!pfn_valid(__phys_to_pfn(pa
)))
200 pg
= phys_to_page(pa
);
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
215 * This function maps a user space buffer into DSP virtual address.
218 u32
user_to_dsp_map(struct iommu
*mmu
, u32 uva
, u32 da
, u32 size
,
219 struct page
**usr_pgs
)
224 struct vm_area_struct
*vma
;
225 struct mm_struct
*mm
= current
->mm
;
226 struct sg_table
*sgt
;
227 struct scatterlist
*sg
;
229 if (!size
|| !usr_pgs
)
232 pages
= size
/ PG_SIZE4K
;
234 down_read(&mm
->mmap_sem
);
235 vma
= find_vma(mm
, uva
);
236 while (vma
&& (uva
+ size
> vma
->vm_end
))
237 vma
= find_vma(mm
, vma
->vm_end
+ 1);
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__
, uva
, size
);
242 up_read(&mm
->mmap_sem
);
245 if (vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))
248 if (vma
->vm_flags
& VM_IO
)
249 i
= get_io_pages(mm
, uva
, pages
, usr_pgs
);
251 i
= get_user_pages(current
, mm
, uva
, pages
, w
, 1,
253 up_read(&mm
->mmap_sem
);
263 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
269 res
= sg_alloc_table(sgt
, pages
, GFP_KERNEL
);
274 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
)
275 sg_set_page(sg
, usr_pgs
[i
], PAGE_SIZE
, 0);
277 da
= iommu_vmap(mmu
, da
, sgt
, IOVMF_ENDIAN_LITTLE
| IOVMF_ELSZ_32
);
279 if (!IS_ERR_VALUE(da
))
289 put_page(usr_pgs
[i
]);
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
298 * This function unmaps a user space buffer into DSP virtual address.
301 int user_to_dsp_unmap(struct iommu
*mmu
, u32 da
)
304 struct sg_table
*sgt
;
305 struct scatterlist
*sg
;
307 sgt
= iommu_vunmap(mmu
, da
);
311 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
)
312 put_page(sg_page(sg
));