2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Data structure definition for Exynos IOMMU driver
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef _EXYNOS_IOMMU_H_
12 #define _EXYNOS_IOMMU_H_
14 #include <linux/kernel.h>
15 #include <linux/spinlock.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/platform_device.h>
19 #include <linux/genalloc.h>
20 #include <linux/iommu.h>
21 #include <linux/irq.h>
22 #include <linux/clk.h>
24 #include <linux/exynos_iovmm.h>
28 #include "exynos-iommu-log.h"
30 #define TRACE_LOG(...) do { } while (0) /* trace_printk */
31 #define TRACE_LOG_DEV(dev, fmt, args...) \
32 TRACE_LOG("%s: " fmt, dev_name(dev), ##args)
34 #define MODULE_NAME "exynos-sysmmu"
36 #define IOVA_START 0x10000000
37 #define IOVM_SIZE (SZ_2G + SZ_1G + SZ_256M) /* last 4K is for error values */
39 #define IOVM_NUM_PAGES(vmsize) (vmsize / PAGE_SIZE)
40 #define IOVM_BITMAP_SIZE(vmsize) \
41 ((IOVM_NUM_PAGES(vmsize) + BITS_PER_BYTE) / BITS_PER_BYTE)
43 #define SPSECT_ORDER 24
44 #define DSECT_ORDER 21
46 #define LPAGE_ORDER 16
47 #define SPAGE_ORDER 12
49 #define SPSECT_SIZE (1 << SPSECT_ORDER)
50 #define DSECT_SIZE (1 << DSECT_ORDER)
51 #define SECT_SIZE (1 << SECT_ORDER)
52 #define LPAGE_SIZE (1 << LPAGE_ORDER)
53 #define SPAGE_SIZE (1 << SPAGE_ORDER)
55 #define SPSECT_MASK ~(SPSECT_SIZE - 1)
56 #define DSECT_MASK ~(DSECT_SIZE - 1)
57 #define SECT_MASK ~(SECT_SIZE - 1)
58 #define LPAGE_MASK ~(LPAGE_SIZE - 1)
59 #define SPAGE_MASK ~(SPAGE_SIZE - 1)
61 #define SPSECT_ENT_MASK ~((SPSECT_SIZE >> PG_ENT_SHIFT) - 1)
62 #define DSECT_ENT_MASK ~((DSECT_SIZE >> PG_ENT_SHIFT) - 1)
63 #define SECT_ENT_MASK ~((SECT_SIZE >> PG_ENT_SHIFT) - 1)
64 #define LPAGE_ENT_MASK ~((LPAGE_SIZE >> PG_ENT_SHIFT) - 1)
65 #define SPAGE_ENT_MASK ~((SPAGE_SIZE >> PG_ENT_SHIFT) - 1)
67 #define SECT_PER_SPSECT (SPSECT_SIZE / SECT_SIZE)
68 #define SECT_PER_DSECT (DSECT_SIZE / SECT_SIZE)
69 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
71 #define PGBASE_TO_PHYS(pgent) (phys_addr_t)((pgent) << PG_ENT_SHIFT)
73 #define MAX_NUM_PBUF 6
74 #define MAX_NUM_PLANE 6
76 #define NUM_LV1ENTRIES 4096
77 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
79 #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
80 #define lv2ent_offset(iova) ((iova & ~SECT_MASK) >> SPAGE_ORDER)
82 typedef u32 sysmmu_pte_t
;
84 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
86 #define spsection_phys(sent) PGBASE_TO_PHYS(*(sent) & SPSECT_ENT_MASK)
87 #define spsection_offs(iova) ((iova) & (SPSECT_SIZE - 1))
88 #define section_phys(sent) PGBASE_TO_PHYS(*(sent) & SECT_ENT_MASK)
89 #define section_offs(iova) ((iova) & (SECT_SIZE - 1))
90 #define lpage_phys(pent) PGBASE_TO_PHYS(*(pent) & LPAGE_ENT_MASK)
91 #define lpage_offs(iova) ((iova) & (LPAGE_SIZE - 1))
92 #define spage_phys(pent) PGBASE_TO_PHYS(*(pent) & SPAGE_ENT_MASK)
93 #define spage_offs(iova) ((iova) & (SPAGE_SIZE - 1))
95 #define lv2table_base(sent) ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
97 #define SYSMMU_BLOCK_POLLING_COUNT 4096
99 #define REG_MMU_CTRL 0x000
100 #define REG_MMU_CFG 0x004
101 #define REG_MMU_STATUS 0x008
102 #define REG_MMU_VERSION 0x034
104 #define CTRL_ENABLE 0x5
105 #define CTRL_BLOCK 0x7
106 #define CTRL_DISABLE 0x0
107 #define CTRL_BLOCK_DISABLE 0x3
109 #define CFG_ACGEN (1 << 24) /* System MMU 3.3+ */
110 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ */
111 #define CFG_SHAREABLE (1 << 12) /* System MMU 3.0+ */
112 #define CFG_QOS_OVRRIDE (1 << 11) /* System MMU 3.3+ */
113 #define CFG_QOS(n) (((n) & 0xF) << 7)
116 * Metadata attached to the owner of a group of System MMUs that belong
117 * to the same owner device.
119 struct exynos_iommu_owner
{
120 struct list_head client
; /* entry of exynos_iommu_domain.clients */
122 struct exynos_iommu_owner
*next
; /* linked list of Owners */
123 void *vmm_data
; /* IO virtual memory manager's data */
124 spinlock_t lock
; /* Lock to preserve consistency of System MMU */
125 struct list_head mmu_list
; /* head of sysmmu_list_data.node */
126 #ifdef CONFIG_EXYNOS_IOMMU_V6
127 struct notifier_block nb
;
128 iommu_fault_handler_t fault_handler
;
133 struct exynos_vm_region
{
134 struct list_head node
;
141 struct exynos_iovmm
{
142 struct iommu_domain
*domain
; /* iommu domain for this iovmm */
143 size_t iovm_size
[MAX_NUM_PLANE
]; /* iovm bitmap size per plane */
144 u32 iova_start
[MAX_NUM_PLANE
]; /* iovm start address per plane */
145 unsigned long *vm_map
[MAX_NUM_PLANE
]; /* iovm biatmap per plane */
146 struct list_head regions_list
; /* list of exynos_vm_region */
147 spinlock_t vmlist_lock
; /* lock for updating regions_list */
148 spinlock_t bitmap_lock
; /* lock for manipulating bitmaps */
149 struct device
*dev
; /* peripheral device that has this iovmm */
150 size_t allocated_size
[MAX_NUM_PLANE
];
151 int num_areas
[MAX_NUM_PLANE
];
154 unsigned int num_map
;
155 unsigned int num_unmap
;
156 const char *domain_name
;
157 #ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
158 struct exynos_iommu_event_log log
;
162 void exynos_sysmmu_tlb_invalidate(struct iommu_domain
*domain
, dma_addr_t start
,
165 #define SYSMMU_FAULT_WRITE (1 << SYSMMU_FAULTS_NUM)
167 enum sysmmu_property
{
168 SYSMMU_PROP_RESERVED
,
171 SYSMMU_PROP_READWRITE
= SYSMMU_PROP_READ
| SYSMMU_PROP_WRITE
,
172 SYSMMU_PROP_RW_MASK
= SYSMMU_PROP_READWRITE
,
173 SYSMMU_PROP_NONBLOCK_TLBINV
= 0x10,
174 SYSMMU_PROP_STOP_BLOCK
= 0x20,
175 SYSMMU_PROP_DISABLE_ACG
= 0x40,
176 SYSMMU_PROP_WINDOW_SHIFT
= 16,
177 SYSMMU_PROP_WINDOW_MASK
= 0x1F << SYSMMU_PROP_WINDOW_SHIFT
,
180 enum sysmmu_clock_ids
{
188 * Metadata attached to each System MMU devices.
190 struct sysmmu_drvdata
{
191 struct list_head node
; /* entry of exynos_iommu_owner.mmu_list */
192 struct list_head pb_grp_list
; /* list of pb groups */
193 struct sysmmu_drvdata
*next
; /* linked list of System MMU */
194 struct device
*sysmmu
; /* System MMU's device descriptor */
195 struct device
*master
; /* Client device that needs System MMU */
196 void __iomem
*sfrbase
;
197 struct clk
*clocks
[SYSMMU_CLK_NUM
];
199 struct iommu_domain
*domain
; /* domain given to iommu_attach_device() */
202 struct sysmmu_prefbuf pbufs
[MAX_NUM_PBUF
];
207 enum sysmmu_property prop
; /* mach/sysmmu.h */
208 #ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
209 struct exynos_iommu_event_log log
;
211 #ifdef CONFIG_EXYNOS_IOMMU_V6
212 struct atomic_notifier_head fault_notifiers
;
214 unsigned char event_cnt
;
217 struct exynos_iommu_domain
{
218 struct list_head clients
; /* list of sysmmu_drvdata.node */
219 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
220 atomic_t
*lv2entcnt
; /* free lv2 entry counter for each section */
221 spinlock_t lock
; /* lock for this structure */
222 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
223 #ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
224 struct exynos_iommu_event_log log
;
229 struct list_head node
;
233 int ar_axi_id
[MAX_NUM_PBUF
];
234 int aw_axi_id
[MAX_NUM_PBUF
];
235 struct device
*master
;
236 enum sysmmu_property prop
;
239 int sysmmu_set_ppc_event(struct sysmmu_drvdata
*drvdata
, int event
);
240 void dump_sysmmu_ppc_cnt(struct sysmmu_drvdata
*drvdata
);
241 extern const char *ppc_event_name
[];
243 #if defined(CONFIG_EXYNOS7_IOMMU) && defined(CONFIG_EXYNOS5_IOMMU)
244 #error "CONFIG_IOMMU_EXYNOS5 and CONFIG_IOMMU_EXYNOS7 defined together"
247 #if defined(CONFIG_EXYNOS5_IOMMU) /* System MMU v1/2/3 */
249 #define REG_PPC_PMNC 0x800
250 #define REG_PPC_CNTENS 0x810
251 #define REG_PPC_CNTENC 0x820
252 #define REG_PPC_INTENS 0x830
253 #define REG_PPC_INTENC 0x840
254 #define REG_PPC_FLAG 0x850
255 #define REG_PPC_CCNT 0x900
256 #define REG_PPC_PMCNT(x) (0x910 + 0x10 * (x))
257 #define REG_PPC_EVENT_SEL(offset, cnt) ((offset) + 0x4 * (cnt))
259 #define SYSMMU_OF_COMPAT_STRING "samsung,exynos4210-sysmmu"
260 #define DEFAULT_QOS_VALUE 8
261 #define PG_ENT_SHIFT 0 /* 32bit PA, 32bit VA */
262 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
263 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
264 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
265 ((*(sent) & 3) == 1))
266 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
267 #define lv1ent_dsection(sent) 0 /* Large section is not defined */
268 #define lv1ent_spsection(sent) (((*(sent) & 3) == 2) && \
269 (((*(sent) >> 18) & 1) == 1))
270 #define lv2ent_fault(pent) ((*(pent) & 3) == 0 || \
271 ((*(pent) & SPAGE_ENT_MASK) == fault_page))
272 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
273 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
274 #define dsection_phys(sent) ({ BUG(); 0; }) /* large section is not defined */
275 #define dsection_offs(iova) ({ BUG(); 0; })
276 #define mk_lv1ent_spsect(pa) ((sysmmu_pte_t) ((pa) | 0x40002))
277 #define mk_lv1ent_dsect(pa) ({ BUG(); 0; })
278 #define mk_lv1ent_sect(pa) ((sysmmu_pte_t) ((pa) | 2))
279 #define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) | 1))
280 #define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) | 1))
281 #define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) | 2))
282 #define set_lv1ent_shareable(sent) (*(sent) |= (1 << 16))
283 #define set_lv2ent_shareable(pent) (*(pent) |= (1 << 10))
285 #define PGSIZE_BITMAP (SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE)
287 #define __exynos_sysmmu_set_df(drvdata, iova) do { } while (0)
288 #define __exynos_sysmmu_release_df(drvdata) do { } while (0)
290 /* System MMU v5 ~ */
291 #elif defined(CONFIG_EXYNOS7_IOMMU) || defined(CONFIG_EXYNOS_IOMMU_V6)
293 #define REG_PPC_EVENT_SEL(x) (0x600 + 0x4 * (x))
294 #define REG_PPC_PMNC 0x620
295 #define REG_PPC_CNTENS 0x624
296 #define REG_PPC_CNTENC 0x628
297 #define REG_PPC_INTENS 0x62C
298 #define REG_PPC_INTENC 0x630
299 #define REG_PPC_FLAG 0x634
300 #define REG_PPC_CCNT 0x640
301 #define REG_PPC_PMCNT(x) (0x644 + 0x4 * (x))
303 #define SYSMMU_OF_COMPAT_STRING "samsung,exynos5430-sysmmu"
304 #define DEFAULT_QOS_VALUE -1 /* Inherited from master */
305 #define PG_ENT_SHIFT 4 /* 36bit PA, 32bit VA */
306 #ifndef CONFIG_EXYNOS_IOMMU_V6
307 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
308 ((*(sent) & 7) == 0))
309 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
310 ((*(sent) & 7) == 1))
312 #define lv1ent_fault(sent) ((*(sent) & 7) == 0)
313 #define lv1ent_page(sent) ((*(sent) & 7) == 1)
315 #define lv1ent_section(sent) ((*(sent) & 7) == 2)
316 #define lv1ent_dsection(sent) ((*(sent) & 7) == 4)
317 #define lv1ent_spsection(sent) ((*(sent) & 7) == 6)
318 #define lv2ent_fault(pent) ((*(pent) & 3) == 0 || \
319 (PGBASE_TO_PHYS(*(pent) & SPAGE_ENT_MASK) == fault_page))
320 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
321 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
322 #define dsection_phys(sent) PGBASE_TO_PHYS(*(sent) & DSECT_ENT_MASK)
323 #define dsection_offs(iova) ((iova) & (DSECT_SIZE - 1))
324 #define mk_lv1ent_spsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 6)
325 #define mk_lv1ent_dsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 4)
326 #define mk_lv1ent_sect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
327 #define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
328 #define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
329 #define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
330 #define set_lv1ent_shareable(sent) (*(sent) |= (1 << 6))
331 #define set_lv2ent_shareable(pent) (*(pent) |= (1 << 4))
333 #define PGSIZE_BITMAP (DSECT_SIZE | SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE)
335 void __sysmmu_show_status(struct sysmmu_drvdata
*drvdata
);
336 void __exynos_sysmmu_set_df(struct sysmmu_drvdata
*drvdata
, dma_addr_t iova
);
337 void __exynos_sysmmu_release_df(struct sysmmu_drvdata
*drvdata
);
340 #error "Neither CONFIG_IOMMU_EXYNOS5 nor CONFIG_IOMMU_EXYNOS7 is defined"
343 static inline void __sysmmu_clk_enable(struct sysmmu_drvdata
*data
)
345 if (!IS_ERR(data
->clocks
[SYSMMU_ACLK
]))
346 clk_enable(data
->clocks
[SYSMMU_ACLK
]);
348 if (!IS_ERR(data
->clocks
[SYSMMU_PCLK
]))
349 clk_enable(data
->clocks
[SYSMMU_PCLK
]);
352 static inline void __sysmmu_clk_disable(struct sysmmu_drvdata
*data
)
354 if (!IS_ERR(data
->clocks
[SYSMMU_ACLK
]))
355 clk_disable(data
->clocks
[SYSMMU_ACLK
]);
357 if (!IS_ERR(data
->clocks
[SYSMMU_PCLK
]))
358 clk_disable(data
->clocks
[SYSMMU_PCLK
]);
361 static inline void __master_clk_enable(struct sysmmu_drvdata
*data
)
363 if (!IS_ERR(data
->clocks
[SYSMMU_MASTER
]))
364 clk_enable(data
->clocks
[SYSMMU_MASTER
]);
367 static inline void __master_clk_disable(struct sysmmu_drvdata
*data
)
369 if (!IS_ERR(data
->clocks
[SYSMMU_MASTER
]))
370 clk_disable(data
->clocks
[SYSMMU_MASTER
]);
374 #if defined(CONFIG_EXYNOS7_IOMMU) && defined(CONFIG_EXYNOS5_IOMMU)
375 #error "CONFIG_IOMMU_EXYNOS5 and CONFIG_IOMMU_EXYNOS7 defined together"
378 static inline bool get_sysmmu_runtime_active(struct sysmmu_drvdata
*data
)
380 return ++data
->runtime_active
== 1;
383 static inline bool put_sysmmu_runtime_active(struct sysmmu_drvdata
*data
)
385 BUG_ON(data
->runtime_active
< 1);
386 return --data
->runtime_active
== 0;
389 static inline bool is_sysmmu_runtime_active(struct sysmmu_drvdata
*data
)
391 return data
->runtime_active
> 0;
394 static inline bool set_sysmmu_active(struct sysmmu_drvdata
*data
)
396 /* return true if the System MMU was not active previously
397 and it needs to be initialized */
398 return ++data
->activations
== 1;
401 static inline bool set_sysmmu_inactive(struct sysmmu_drvdata
*data
)
403 /* return true if the System MMU is needed to be disabled */
404 BUG_ON(data
->activations
< 1);
405 return --data
->activations
== 0;
408 static inline bool is_sysmmu_active(struct sysmmu_drvdata
*data
)
410 return data
->activations
> 0;
413 static inline bool is_sysmmu_really_enabled(struct sysmmu_drvdata
*data
)
415 return is_sysmmu_active(data
) && data
->runtime_active
;
418 #define MMU_MAJ_VER(val) ((val) >> 7)
419 #define MMU_MIN_VER(val) ((val) & 0x7F)
420 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
422 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
424 static inline unsigned int __raw_sysmmu_version(void __iomem
*sfrbase
)
426 return MMU_RAW_VER(__raw_readl(sfrbase
+ REG_MMU_VERSION
));
429 static inline void __raw_sysmmu_disable(void __iomem
*sfrbase
, int disable
)
431 __raw_writel(0, sfrbase
+ REG_MMU_CFG
);
432 __raw_writel(disable
, sfrbase
+ REG_MMU_CTRL
);
433 BUG_ON(__raw_readl(sfrbase
+ REG_MMU_CTRL
) != disable
);
436 static inline void __raw_sysmmu_enable(void __iomem
*sfrbase
)
438 __raw_writel(CTRL_ENABLE
, sfrbase
+ REG_MMU_CTRL
);
441 #define sysmmu_unblock __raw_sysmmu_enable
443 void dump_sysmmu_tlb_pb(void __iomem
*sfrbase
);
445 static inline bool sysmmu_block(void __iomem
*sfrbase
)
447 int i
= SYSMMU_BLOCK_POLLING_COUNT
;
449 __raw_writel(CTRL_BLOCK
, sfrbase
+ REG_MMU_CTRL
);
450 while ((i
> 0) && !(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1))
453 if (!(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1)) {
454 dump_sysmmu_tlb_pb(sfrbase
);
455 #if defined(CONFIG_EXYNOS5430_BTS)
456 exynos5_bts_show_mo_status();
458 panic("Failed to block System MMU!");
459 sysmmu_unblock(sfrbase
);
466 void __sysmmu_init_config(struct sysmmu_drvdata
*drvdata
);
467 void __sysmmu_set_ptbase(void __iomem
*sfrbase
, phys_addr_t pfn_pgtable
);
469 extern sysmmu_pte_t
*zero_lv2_table
;
470 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
472 static inline sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, unsigned long iova
)
474 return (sysmmu_pte_t
*)(phys_to_virt(lv2table_base(sent
))) +
478 static inline sysmmu_pte_t
*section_entry(
479 sysmmu_pte_t
*pgtable
, unsigned long iova
)
481 return (sysmmu_pte_t
*)(pgtable
+ lv1ent_offset(iova
));
484 irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
);
485 void __sysmmu_tlb_invalidate_flpdcache(void __iomem
*sfrbase
, dma_addr_t iova
);
486 void __sysmmu_tlb_invalidate_entry(void __iomem
*sfrbase
, dma_addr_t iova
);
487 void __sysmmu_tlb_invalidate(struct sysmmu_drvdata
*drvdata
,
488 dma_addr_t iova
, size_t size
);
489 #if defined(CONFIG_EXYNOS_IOMMU)
490 void __exynos_sysmmu_set_prefbuf_by_plane(struct sysmmu_drvdata
*drvdata
,
491 unsigned int inplanes
, unsigned int onplanes
,
492 unsigned int ipoption
, unsigned int opoption
);
493 void __exynos_sysmmu_set_prefbuf_by_region(struct sysmmu_drvdata
*drvdata
,
494 struct sysmmu_prefbuf pb_reg
[],
495 unsigned int num_reg
);
496 int __prepare_prefetch_buffers_by_plane(struct sysmmu_drvdata
*drvdata
,
497 struct sysmmu_prefbuf prefbuf
[], int num_pb
,
498 int inplanes
, int onplanes
,
499 int ipoption
, int opoption
);
502 void dump_sysmmu_tlb_pb(void __iomem
*sfrbase
);
504 #if defined(CONFIG_EXYNOS_IOVMM) || defined(CONFIG_EXYNOS_IOVMM_V6)
505 static inline struct exynos_iovmm
*exynos_get_iovmm(struct device
*dev
)
507 if (!dev
->archdata
.iommu
) {
508 dev_err(dev
, "%s: System MMU is not configured\n", __func__
);
512 return ((struct exynos_iommu_owner
*)dev
->archdata
.iommu
)->vmm_data
;
515 struct exynos_vm_region
*find_iovm_region(struct exynos_iovmm
*vmm
,
518 static inline int find_iovmm_plane(struct exynos_iovmm
*vmm
, dma_addr_t iova
)
522 for (i
= 0; i
< (vmm
->inplanes
+ vmm
->onplanes
); i
++)
523 if ((iova
>= vmm
->iova_start
[i
]) &&
524 (iova
< (vmm
->iova_start
[i
] + vmm
->iovm_size
[i
])))
529 #if defined(CONFIG_EXYNOS_IOVMM_V6)
530 struct exynos_iovmm
*exynos_create_single_iovmm(const char *name
);
531 int exynos_sysmmu_add_fault_notifier(struct device
*dev
,
532 iommu_fault_handler_t handler
, void *token
);
535 static inline struct exynos_iovmm
*exynos_get_iovmm(struct device
*dev
)
540 struct exynos_vm_region
*find_iovm_region(struct exynos_iovmm
*vmm
,
546 static inline int find_iovmm_plane(struct exynos_iovmm
*vmm
, dma_addr_t iova
)
551 static inline struct exynos_iovmm
*exynos_create_single_iovmm(const char *name
)
555 #endif /* CONFIG_EXYNOS_IOVMM */
557 #endif /* _EXYNOS_IOMMU_H_ */