ARM: Fix build after memfd_create syscall
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / iommu / exynos-iommu.h
CommitLineData
3c2a0909
S
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Data structure definition for Exynos IOMMU driver
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _EXYNOS_IOMMU_H_
12#define _EXYNOS_IOMMU_H_
13
14#include <linux/kernel.h>
15#include <linux/spinlock.h>
16#include <linux/list.h>
17#include <linux/device.h>
18#include <linux/platform_device.h>
19#include <linux/genalloc.h>
20#include <linux/iommu.h>
21#include <linux/irq.h>
22#include <linux/clk.h>
23
24#include <linux/exynos_iovmm.h>
25
26#include <mach/bts.h>
27
28#include "exynos-iommu-log.h"
29
30#define TRACE_LOG(...) do { } while (0) /* trace_printk */
31#define TRACE_LOG_DEV(dev, fmt, args...) \
32 TRACE_LOG("%s: " fmt, dev_name(dev), ##args)
33
34#define MODULE_NAME "exynos-sysmmu"
35
36#define IOVA_START 0x10000000
37#define IOVM_SIZE (SZ_2G + SZ_1G + SZ_256M) /* last 4K is for error values */
38
39#define IOVM_NUM_PAGES(vmsize) (vmsize / PAGE_SIZE)
40#define IOVM_BITMAP_SIZE(vmsize) \
41 ((IOVM_NUM_PAGES(vmsize) + BITS_PER_BYTE) / BITS_PER_BYTE)
42
43#define SPSECT_ORDER 24
44#define DSECT_ORDER 21
45#define SECT_ORDER 20
46#define LPAGE_ORDER 16
47#define SPAGE_ORDER 12
48
49#define SPSECT_SIZE (1 << SPSECT_ORDER)
50#define DSECT_SIZE (1 << DSECT_ORDER)
51#define SECT_SIZE (1 << SECT_ORDER)
52#define LPAGE_SIZE (1 << LPAGE_ORDER)
53#define SPAGE_SIZE (1 << SPAGE_ORDER)
54
55#define SPSECT_MASK ~(SPSECT_SIZE - 1)
56#define DSECT_MASK ~(DSECT_SIZE - 1)
57#define SECT_MASK ~(SECT_SIZE - 1)
58#define LPAGE_MASK ~(LPAGE_SIZE - 1)
59#define SPAGE_MASK ~(SPAGE_SIZE - 1)
60
61#define SPSECT_ENT_MASK ~((SPSECT_SIZE >> PG_ENT_SHIFT) - 1)
62#define DSECT_ENT_MASK ~((DSECT_SIZE >> PG_ENT_SHIFT) - 1)
63#define SECT_ENT_MASK ~((SECT_SIZE >> PG_ENT_SHIFT) - 1)
64#define LPAGE_ENT_MASK ~((LPAGE_SIZE >> PG_ENT_SHIFT) - 1)
65#define SPAGE_ENT_MASK ~((SPAGE_SIZE >> PG_ENT_SHIFT) - 1)
66
67#define SECT_PER_SPSECT (SPSECT_SIZE / SECT_SIZE)
68#define SECT_PER_DSECT (DSECT_SIZE / SECT_SIZE)
69#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
70
71#define PGBASE_TO_PHYS(pgent) (phys_addr_t)((pgent) << PG_ENT_SHIFT)
72
73#define MAX_NUM_PBUF 6
74#define MAX_NUM_PLANE 6
75
76#define NUM_LV1ENTRIES 4096
77#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
78
79#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
80#define lv2ent_offset(iova) ((iova & ~SECT_MASK) >> SPAGE_ORDER)
81
82typedef u32 sysmmu_pte_t;
83
84#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
85
86#define spsection_phys(sent) PGBASE_TO_PHYS(*(sent) & SPSECT_ENT_MASK)
87#define spsection_offs(iova) ((iova) & (SPSECT_SIZE - 1))
88#define section_phys(sent) PGBASE_TO_PHYS(*(sent) & SECT_ENT_MASK)
89#define section_offs(iova) ((iova) & (SECT_SIZE - 1))
90#define lpage_phys(pent) PGBASE_TO_PHYS(*(pent) & LPAGE_ENT_MASK)
91#define lpage_offs(iova) ((iova) & (LPAGE_SIZE - 1))
92#define spage_phys(pent) PGBASE_TO_PHYS(*(pent) & SPAGE_ENT_MASK)
93#define spage_offs(iova) ((iova) & (SPAGE_SIZE - 1))
94
95#define lv2table_base(sent) ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
96
97#define SYSMMU_BLOCK_POLLING_COUNT 4096
98
99#define REG_MMU_CTRL 0x000
100#define REG_MMU_CFG 0x004
101#define REG_MMU_STATUS 0x008
102#define REG_MMU_VERSION 0x034
103
104#define CTRL_ENABLE 0x5
105#define CTRL_BLOCK 0x7
106#define CTRL_DISABLE 0x0
107#define CTRL_BLOCK_DISABLE 0x3
108
109#define CFG_ACGEN (1 << 24) /* System MMU 3.3+ */
110#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ */
111#define CFG_SHAREABLE (1 << 12) /* System MMU 3.0+ */
112#define CFG_QOS_OVRRIDE (1 << 11) /* System MMU 3.3+ */
113#define CFG_QOS(n) (((n) & 0xF) << 7)
114
115/*
116 * Metadata attached to the owner of a group of System MMUs that belong
117 * to the same owner device.
118 */
119struct exynos_iommu_owner {
120 struct list_head client; /* entry of exynos_iommu_domain.clients */
121 struct device *dev;
122 struct exynos_iommu_owner *next; /* linked list of Owners */
123 void *vmm_data; /* IO virtual memory manager's data */
124 spinlock_t lock; /* Lock to preserve consistency of System MMU */
125 struct list_head mmu_list; /* head of sysmmu_list_data.node */
126#ifdef CONFIG_EXYNOS_IOMMU_V6
127 struct notifier_block nb;
128 iommu_fault_handler_t fault_handler;
129 void *token;
130#endif
131};
132
133struct exynos_vm_region {
134 struct list_head node;
135 u32 start;
136 u32 size;
137 u32 section_off;
138 u32 dummy_size;
139};
140
141struct exynos_iovmm {
142 struct iommu_domain *domain; /* iommu domain for this iovmm */
143 size_t iovm_size[MAX_NUM_PLANE]; /* iovm bitmap size per plane */
144 u32 iova_start[MAX_NUM_PLANE]; /* iovm start address per plane */
145 unsigned long *vm_map[MAX_NUM_PLANE]; /* iovm biatmap per plane */
146 struct list_head regions_list; /* list of exynos_vm_region */
147 spinlock_t vmlist_lock; /* lock for updating regions_list */
148 spinlock_t bitmap_lock; /* lock for manipulating bitmaps */
149 struct device *dev; /* peripheral device that has this iovmm */
150 size_t allocated_size[MAX_NUM_PLANE];
151 int num_areas[MAX_NUM_PLANE];
152 int inplanes;
153 int onplanes;
154 unsigned int num_map;
155 unsigned int num_unmap;
156 const char *domain_name;
157#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
158 struct exynos_iommu_event_log log;
159#endif
160};
161
162void exynos_sysmmu_tlb_invalidate(struct iommu_domain *domain, dma_addr_t start,
163 size_t size);
164
165#define SYSMMU_FAULT_WRITE (1 << SYSMMU_FAULTS_NUM)
166
167enum sysmmu_property {
168 SYSMMU_PROP_RESERVED,
169 SYSMMU_PROP_READ,
170 SYSMMU_PROP_WRITE,
171 SYSMMU_PROP_READWRITE = SYSMMU_PROP_READ | SYSMMU_PROP_WRITE,
172 SYSMMU_PROP_RW_MASK = SYSMMU_PROP_READWRITE,
173 SYSMMU_PROP_NONBLOCK_TLBINV = 0x10,
174 SYSMMU_PROP_STOP_BLOCK = 0x20,
175 SYSMMU_PROP_DISABLE_ACG = 0x40,
176 SYSMMU_PROP_WINDOW_SHIFT = 16,
177 SYSMMU_PROP_WINDOW_MASK = 0x1F << SYSMMU_PROP_WINDOW_SHIFT,
178};
179
180enum sysmmu_clock_ids {
181 SYSMMU_ACLK,
182 SYSMMU_PCLK,
183 SYSMMU_MASTER,
184 SYSMMU_CLK_NUM,
185};
186
187/*
188 * Metadata attached to each System MMU devices.
189 */
190struct sysmmu_drvdata {
191 struct list_head node; /* entry of exynos_iommu_owner.mmu_list */
192 struct list_head pb_grp_list; /* list of pb groups */
193 struct sysmmu_drvdata *next; /* linked list of System MMU */
194 struct device *sysmmu; /* System MMU's device descriptor */
195 struct device *master; /* Client device that needs System MMU */
196 void __iomem *sfrbase;
197 struct clk *clocks[SYSMMU_CLK_NUM];
198 int activations;
199 struct iommu_domain *domain; /* domain given to iommu_attach_device() */
200 phys_addr_t pgtable;
201 spinlock_t lock;
202 struct sysmmu_prefbuf pbufs[MAX_NUM_PBUF];
203 short qos;
204 short num_pbufs;
205 int runtime_active;
206 bool suspended;
207 enum sysmmu_property prop; /* mach/sysmmu.h */
208#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
209 struct exynos_iommu_event_log log;
210#endif
211#ifdef CONFIG_EXYNOS_IOMMU_V6
212 struct atomic_notifier_head fault_notifiers;
213#endif
214 unsigned char event_cnt;
215};
216
217struct exynos_iommu_domain {
218 struct list_head clients; /* list of sysmmu_drvdata.node */
219 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
220 atomic_t *lv2entcnt; /* free lv2 entry counter for each section */
221 spinlock_t lock; /* lock for this structure */
222 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
223#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
224 struct exynos_iommu_event_log log;
225#endif
226};
227
228struct pb_info {
229 struct list_head node;
230 int ar_id_num;
231 int aw_id_num;
232 int grp_num;
233 int ar_axi_id[MAX_NUM_PBUF];
234 int aw_axi_id[MAX_NUM_PBUF];
235 struct device *master;
236 enum sysmmu_property prop;
237};
238
239int sysmmu_set_ppc_event(struct sysmmu_drvdata *drvdata, int event);
240void dump_sysmmu_ppc_cnt(struct sysmmu_drvdata *drvdata);
241extern const char *ppc_event_name[];
242
243#if defined(CONFIG_EXYNOS7_IOMMU) && defined(CONFIG_EXYNOS5_IOMMU)
244#error "CONFIG_IOMMU_EXYNOS5 and CONFIG_IOMMU_EXYNOS7 defined together"
245#endif
246
247#if defined(CONFIG_EXYNOS5_IOMMU) /* System MMU v1/2/3 */
248
249#define REG_PPC_PMNC 0x800
250#define REG_PPC_CNTENS 0x810
251#define REG_PPC_CNTENC 0x820
252#define REG_PPC_INTENS 0x830
253#define REG_PPC_INTENC 0x840
254#define REG_PPC_FLAG 0x850
255#define REG_PPC_CCNT 0x900
256#define REG_PPC_PMCNT(x) (0x910 + 0x10 * (x))
257#define REG_PPC_EVENT_SEL(offset, cnt) ((offset) + 0x4 * (cnt))
258
259#define SYSMMU_OF_COMPAT_STRING "samsung,exynos4210-sysmmu"
260#define DEFAULT_QOS_VALUE 8
261#define PG_ENT_SHIFT 0 /* 32bit PA, 32bit VA */
262#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
263 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
264#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
265 ((*(sent) & 3) == 1))
266#define lv1ent_section(sent) ((*(sent) & 3) == 2)
267#define lv1ent_dsection(sent) 0 /* Large section is not defined */
268#define lv1ent_spsection(sent) (((*(sent) & 3) == 2) && \
269 (((*(sent) >> 18) & 1) == 1))
270#define lv2ent_fault(pent) ((*(pent) & 3) == 0 || \
271 ((*(pent) & SPAGE_ENT_MASK) == fault_page))
272#define lv2ent_small(pent) ((*(pent) & 2) == 2)
273#define lv2ent_large(pent) ((*(pent) & 3) == 1)
274#define dsection_phys(sent) ({ BUG(); 0; }) /* large section is not defined */
275#define dsection_offs(iova) ({ BUG(); 0; })
276#define mk_lv1ent_spsect(pa) ((sysmmu_pte_t) ((pa) | 0x40002))
277#define mk_lv1ent_dsect(pa) ({ BUG(); 0; })
278#define mk_lv1ent_sect(pa) ((sysmmu_pte_t) ((pa) | 2))
279#define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) | 1))
280#define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) | 1))
281#define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) | 2))
282#define set_lv1ent_shareable(sent) (*(sent) |= (1 << 16))
283#define set_lv2ent_shareable(pent) (*(pent) |= (1 << 10))
284
285#define PGSIZE_BITMAP (SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE)
286
287#define __exynos_sysmmu_set_df(drvdata, iova) do { } while (0)
288#define __exynos_sysmmu_release_df(drvdata) do { } while (0)
289
290 /* System MMU v5 ~ */
291#elif defined(CONFIG_EXYNOS7_IOMMU) || defined(CONFIG_EXYNOS_IOMMU_V6)
292
293#define REG_PPC_EVENT_SEL(x) (0x600 + 0x4 * (x))
294#define REG_PPC_PMNC 0x620
295#define REG_PPC_CNTENS 0x624
296#define REG_PPC_CNTENC 0x628
297#define REG_PPC_INTENS 0x62C
298#define REG_PPC_INTENC 0x630
299#define REG_PPC_FLAG 0x634
300#define REG_PPC_CCNT 0x640
301#define REG_PPC_PMCNT(x) (0x644 + 0x4 * (x))
302
303#define SYSMMU_OF_COMPAT_STRING "samsung,exynos5430-sysmmu"
304#define DEFAULT_QOS_VALUE -1 /* Inherited from master */
305#define PG_ENT_SHIFT 4 /* 36bit PA, 32bit VA */
306#ifndef CONFIG_EXYNOS_IOMMU_V6
307#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
308 ((*(sent) & 7) == 0))
309#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
310 ((*(sent) & 7) == 1))
311#else
312#define lv1ent_fault(sent) ((*(sent) & 7) == 0)
313#define lv1ent_page(sent) ((*(sent) & 7) == 1)
314#endif
315#define lv1ent_section(sent) ((*(sent) & 7) == 2)
316#define lv1ent_dsection(sent) ((*(sent) & 7) == 4)
317#define lv1ent_spsection(sent) ((*(sent) & 7) == 6)
318#define lv2ent_fault(pent) ((*(pent) & 3) == 0 || \
319 (PGBASE_TO_PHYS(*(pent) & SPAGE_ENT_MASK) == fault_page))
320#define lv2ent_small(pent) ((*(pent) & 2) == 2)
321#define lv2ent_large(pent) ((*(pent) & 3) == 1)
322#define dsection_phys(sent) PGBASE_TO_PHYS(*(sent) & DSECT_ENT_MASK)
323#define dsection_offs(iova) ((iova) & (DSECT_SIZE - 1))
324#define mk_lv1ent_spsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 6)
325#define mk_lv1ent_dsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 4)
326#define mk_lv1ent_sect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
327#define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
328#define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
329#define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
330#define set_lv1ent_shareable(sent) (*(sent) |= (1 << 6))
331#define set_lv2ent_shareable(pent) (*(pent) |= (1 << 4))
332
333#define PGSIZE_BITMAP (DSECT_SIZE | SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE)
334
335void __sysmmu_show_status(struct sysmmu_drvdata *drvdata);
336void __exynos_sysmmu_set_df(struct sysmmu_drvdata *drvdata, dma_addr_t iova);
337void __exynos_sysmmu_release_df(struct sysmmu_drvdata *drvdata);
338
339#else
340#error "Neither CONFIG_IOMMU_EXYNOS5 nor CONFIG_IOMMU_EXYNOS7 is defined"
341#endif
342
343static inline void __sysmmu_clk_enable(struct sysmmu_drvdata *data)
344{
345 if (!IS_ERR(data->clocks[SYSMMU_ACLK]))
346 clk_enable(data->clocks[SYSMMU_ACLK]);
347
348 if (!IS_ERR(data->clocks[SYSMMU_PCLK]))
349 clk_enable(data->clocks[SYSMMU_PCLK]);
350}
351
352static inline void __sysmmu_clk_disable(struct sysmmu_drvdata *data)
353{
354 if (!IS_ERR(data->clocks[SYSMMU_ACLK]))
355 clk_disable(data->clocks[SYSMMU_ACLK]);
356
357 if (!IS_ERR(data->clocks[SYSMMU_PCLK]))
358 clk_disable(data->clocks[SYSMMU_PCLK]);
359}
360
361static inline void __master_clk_enable(struct sysmmu_drvdata *data)
362{
363 if (!IS_ERR(data->clocks[SYSMMU_MASTER]))
364 clk_enable(data->clocks[SYSMMU_MASTER]);
365}
366
367static inline void __master_clk_disable(struct sysmmu_drvdata *data)
368{
369 if (!IS_ERR(data->clocks[SYSMMU_MASTER]))
370 clk_disable(data->clocks[SYSMMU_MASTER]);
371}
372
373
374#if defined(CONFIG_EXYNOS7_IOMMU) && defined(CONFIG_EXYNOS5_IOMMU)
375#error "CONFIG_IOMMU_EXYNOS5 and CONFIG_IOMMU_EXYNOS7 defined together"
376#endif
377
378static inline bool get_sysmmu_runtime_active(struct sysmmu_drvdata *data)
379{
380 return ++data->runtime_active == 1;
381}
382
383static inline bool put_sysmmu_runtime_active(struct sysmmu_drvdata *data)
384{
385 BUG_ON(data->runtime_active < 1);
386 return --data->runtime_active == 0;
387}
388
389static inline bool is_sysmmu_runtime_active(struct sysmmu_drvdata *data)
390{
391 return data->runtime_active > 0;
392}
393
394static inline bool set_sysmmu_active(struct sysmmu_drvdata *data)
395{
396 /* return true if the System MMU was not active previously
397 and it needs to be initialized */
398 return ++data->activations == 1;
399}
400
401static inline bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
402{
403 /* return true if the System MMU is needed to be disabled */
404 BUG_ON(data->activations < 1);
405 return --data->activations == 0;
406}
407
408static inline bool is_sysmmu_active(struct sysmmu_drvdata *data)
409{
410 return data->activations > 0;
411}
412
413static inline bool is_sysmmu_really_enabled(struct sysmmu_drvdata *data)
414{
415 return is_sysmmu_active(data) && data->runtime_active;
416}
417
418#define MMU_MAJ_VER(val) ((val) >> 7)
419#define MMU_MIN_VER(val) ((val) & 0x7F)
420#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
421
422#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
423
424static inline unsigned int __raw_sysmmu_version(void __iomem *sfrbase)
425{
426 return MMU_RAW_VER(__raw_readl(sfrbase + REG_MMU_VERSION));
427}
428
429static inline void __raw_sysmmu_disable(void __iomem *sfrbase, int disable)
430{
431 __raw_writel(0, sfrbase + REG_MMU_CFG);
432 __raw_writel(disable, sfrbase + REG_MMU_CTRL);
433 BUG_ON(__raw_readl(sfrbase + REG_MMU_CTRL) != disable);
434}
435
436static inline void __raw_sysmmu_enable(void __iomem *sfrbase)
437{
438 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
439}
440
441#define sysmmu_unblock __raw_sysmmu_enable
442
443void dump_sysmmu_tlb_pb(void __iomem *sfrbase);
444
445static inline bool sysmmu_block(void __iomem *sfrbase)
446{
447 int i = SYSMMU_BLOCK_POLLING_COUNT;
448
449 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
450 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
451 --i;
452
453 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
454 dump_sysmmu_tlb_pb(sfrbase);
455#if defined(CONFIG_EXYNOS5430_BTS)
456 exynos5_bts_show_mo_status();
457#endif
458 panic("Failed to block System MMU!");
459 sysmmu_unblock(sfrbase);
460 return false;
461 }
462
463 return true;
464}
465
466void __sysmmu_init_config(struct sysmmu_drvdata *drvdata);
467void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable);
468
469extern sysmmu_pte_t *zero_lv2_table;
470#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
471
472static inline sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, unsigned long iova)
473{
474 return (sysmmu_pte_t *)(phys_to_virt(lv2table_base(sent))) +
475 lv2ent_offset(iova);
476}
477
478static inline sysmmu_pte_t *section_entry(
479 sysmmu_pte_t *pgtable, unsigned long iova)
480{
481 return (sysmmu_pte_t *)(pgtable + lv1ent_offset(iova));
482}
483
484irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id);
485void __sysmmu_tlb_invalidate_flpdcache(void __iomem *sfrbase, dma_addr_t iova);
486void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, dma_addr_t iova);
487void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *drvdata,
488 dma_addr_t iova, size_t size);
489#if defined(CONFIG_EXYNOS_IOMMU)
490void __exynos_sysmmu_set_prefbuf_by_plane(struct sysmmu_drvdata *drvdata,
491 unsigned int inplanes, unsigned int onplanes,
492 unsigned int ipoption, unsigned int opoption);
493void __exynos_sysmmu_set_prefbuf_by_region(struct sysmmu_drvdata *drvdata,
494 struct sysmmu_prefbuf pb_reg[],
495 unsigned int num_reg);
496int __prepare_prefetch_buffers_by_plane(struct sysmmu_drvdata *drvdata,
497 struct sysmmu_prefbuf prefbuf[], int num_pb,
498 int inplanes, int onplanes,
499 int ipoption, int opoption);
500#endif
501
502void dump_sysmmu_tlb_pb(void __iomem *sfrbase);
503
504#if defined(CONFIG_EXYNOS_IOVMM) || defined(CONFIG_EXYNOS_IOVMM_V6)
505static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
506{
507 if (!dev->archdata.iommu) {
508 dev_err(dev, "%s: System MMU is not configured\n", __func__);
509 return NULL;
510 }
511
512 return ((struct exynos_iommu_owner *)dev->archdata.iommu)->vmm_data;
513}
514
515struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
516 dma_addr_t iova);
517
518static inline int find_iovmm_plane(struct exynos_iovmm *vmm, dma_addr_t iova)
519{
520 int i;
521
522 for (i = 0; i < (vmm->inplanes + vmm->onplanes); i++)
523 if ((iova >= vmm->iova_start[i]) &&
524 (iova < (vmm->iova_start[i] + vmm->iovm_size[i])))
525 return i;
526 return -1;
527}
528
529#if defined(CONFIG_EXYNOS_IOVMM_V6)
530struct exynos_iovmm *exynos_create_single_iovmm(const char *name);
531int exynos_sysmmu_add_fault_notifier(struct device *dev,
532 iommu_fault_handler_t handler, void *token);
533#endif
534#else
535static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
536{
537 return NULL;
538}
539
540struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
541 dma_addr_t iova)
542{
543 return NULL;
544}
545
546static inline int find_iovmm_plane(struct exynos_iovmm *vmm, dma_addr_t iova)
547{
548 return -1;
549}
550
551static inline struct exynos_iovmm *exynos_create_single_iovmm(const char *name)
552{
553 return NULL;
554}
555#endif /* CONFIG_EXYNOS_IOVMM */
556
557#endif /* _EXYNOS_IOMMU_H_ */