1 #include <linux/kernel.h>
3 #include <linux/interrupt.h>
4 #include <linux/slab.h>
5 #include <linux/pm_runtime.h>
8 #include <linux/errno.h>
9 #include <linux/memblock.h>
10 #include <linux/export.h>
11 #include <linux/string.h>
13 #include <linux/of_platform.h>
14 #include <linux/device.h>
15 #include <linux/clk.h>
16 #include <linux/clk-private.h>
17 #include <linux/pm_domain.h>
18 #include <linux/sched.h>
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
23 #include <asm/cacheflush.h>
24 #include <asm/pgtable.h>
26 #include <dt-bindings/sysmmu/sysmmu.h>
28 #include "exynos-iommu.h"
30 #define CFG_MASK 0x01101FBC /* Selecting bit 24, 20, 12-7, 5-2 */
32 #define PB_INFO_NUM(reg) ((reg) & 0xFF)
33 #define PB_GRP_NUM(reg) ((reg) >> 20)
34 #define L1TLB_ATTR_IM (1 << 16)
36 #define REG_PT_BASE_PPN 0x00C
37 #define REG_MMU_FLUSH 0x010
38 #define REG_MMU_FLUSH_ENTRY 0x014
39 #define REG_MMU_FLUSH_RANGE 0x018
40 #define REG_FLUSH_RANGE_START 0x020
41 #define REG_FLUSH_RANGE_END 0x024
42 #define REG_MMU_CAPA 0x030
43 #define REG_MMU_CAPA_1 0x038
44 #define REG_INT_STATUS 0x060
45 #define REG_INT_CLEAR 0x064
46 #define REG_FAULT_AR_ADDR 0x070
47 #define REG_FAULT_AR_TRANS_INFO 0x078
48 #define REG_FAULT_AW_ADDR 0x080
49 #define REG_FAULT_AW_TRANS_INFO 0x088
50 #define REG_L1TLB_CFG 0x100 /* sysmmu v5.1 only */
51 #define REG_L1TLB_CTRL 0x108 /* sysmmu v5.1 only */
52 #define REG_L2TLB_CFG 0x200 /* sysmmu that has L2TLB only*/
53 #define REG_PB_LMM 0x300
54 #define REG_PB_GRP_STATE 0x304
55 #define REG_PB_INDICATE 0x308
56 #define REG_PB_CFG 0x310
57 #define REG_PB_START_ADDR 0x320
58 #define REG_PB_END_ADDR 0x328
59 #define REG_PB_AXI_ID 0x330
60 #define REG_PB_INFO 0x350
61 #define REG_SW_DF_VPN 0x400 /* sysmmu v5.1 only */
62 #define REG_SW_DF_VPN_CMD_NUM 0x408 /* sysmmu v5.1 only */
63 #define REG_L1TLB_READ_ENTRY 0x750
64 #define REG_L1TLB_ENTRY_VPN 0x754
65 #define REG_L1TLB_ENTRY_PPN 0x75C
66 #define REG_L1TLB_ENTRY_ATTR 0x764
67 #define REG_L2TLB_READ_ENTRY 0x770
68 #define REG_L2TLB_ENTRY_VPN 0x774
69 #define REG_L2TLB_ENTRY_PPN 0x77C
70 #define REG_L2TLB_ENTRY_ATTR 0x784
71 #define REG_PCI_SPB0_SVPN 0x7A0
72 #define REG_PCI_SPB0_EVPN 0x7A4
73 #define REG_PCI_SPB0_SLOT_VALID 0x7A8
74 #define REG_PCI_SPB1_SVPN 0x7B0
75 #define REG_PCI_SPB1_EVPN 0x7B4
76 #define REG_PCI_SPB1_SLOT_VALID 0x7B8
78 /* 'reg' argument must be the value of REG_MMU_CAPA register */
79 #define MMU_NUM_L1TLB_ENTRIES(reg) (reg & 0xFF)
80 #define MMU_HAVE_PB(reg) (!!((reg >> 20) & 0xF))
81 #define MMU_PB_GRP_NUM(reg) (((reg >> 20) & 0xF))
82 #define MMU_HAVE_L2TLB(reg) (!!((reg >> 8) & 0xF))
84 #define MMU_MAX_DF_CMD 8
87 #define SYSMMU_FAULTS_NUM (SYSMMU_FAULT_UNKNOWN + 1)
89 const char *ppc_event_name
[] = {
96 "BLOCK NUM BY PREFETCHING",
97 "BLOCK CYCLE BY PREFETCHING",
99 "FLPD MISS ON PREFETCHING",
102 static char *sysmmu_fault_name
[SYSMMU_FAULTS_NUM
] = {
105 "L1TLB MULTI-HIT FAULT",
111 static char *sysmmu_clock_names
[SYSMMU_CLK_NUM
] = {"aclk", "pclk", "master"};
113 static const char * const sysmmu_prop_opts
[] = {
114 [SYSMMU_PROP_RESERVED
] = "Reserved",
115 [SYSMMU_PROP_READ
] = "r",
116 [SYSMMU_PROP_WRITE
] = "w",
117 [SYSMMU_PROP_READWRITE
] = "rw", /* default */
120 static int iova_from_sent(sysmmu_pte_t
*base
, sysmmu_pte_t
*sent
)
122 return ((unsigned long)sent
- (unsigned long)base
) *
123 (SECT_SIZE
/ sizeof(sysmmu_pte_t
));
126 struct sysmmu_list_data
{
127 struct device
*sysmmu
;
128 struct list_head node
; /* entry of exynos_iommu_owner.mmu_list */
131 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
132 #define for_each_sysmmu_list(dev, sysmmu_list) \
133 list_for_each_entry(sysmmu_list, \
134 &((struct exynos_iommu_owner *)dev->archdata.iommu)->mmu_list,\
137 static struct exynos_iommu_owner
*sysmmu_owner_list
= NULL
;
138 static struct sysmmu_drvdata
*sysmmu_drvdata_list
= NULL
;
140 static struct kmem_cache
*lv2table_kmem_cache
;
141 static phys_addr_t fault_page
;
142 static struct dentry
*exynos_sysmmu_debugfs_root
;
145 static inline void pgtable_flush(void *vastart
, void *vaend
)
147 dmac_flush_range(vastart
, vaend
);
148 outer_flush_range(virt_to_phys(vastart
),
149 virt_to_phys(vaend
));
152 static inline void pgtable_flush(void *vastart
, void *vaend
)
154 dma_sync_single_for_device(NULL
,
155 virt_to_phys(vastart
),
156 (size_t)(virt_to_phys(vaend
) - virt_to_phys(vastart
)),
161 static bool has_sysmmu_capable_pbuf(void __iomem
*sfrbase
)
163 unsigned long cfg
= __raw_readl(sfrbase
+ REG_MMU_CAPA
);
165 return MMU_HAVE_PB(cfg
) ? true : false;
169 void __sysmmu_tlb_invalidate_flpdcache(void __iomem
*sfrbase
, dma_addr_t iova
)
171 if (has_sysmmu_capable_pbuf(sfrbase
))
172 writel(iova
| 0x1, sfrbase
+ REG_MMU_FLUSH_ENTRY
);
175 void __sysmmu_tlb_invalidate_entry(void __iomem
*sfrbase
, dma_addr_t iova
)
177 writel(iova
| 0x1, sfrbase
+ REG_MMU_FLUSH_ENTRY
);
180 static void __sysmmu_tlb_invalidate_all(void __iomem
*sfrbase
)
182 writel(0x1, sfrbase
+ REG_MMU_FLUSH
);
185 void __sysmmu_tlb_invalidate(struct sysmmu_drvdata
*drvdata
,
186 dma_addr_t iova
, size_t size
)
188 void * __iomem sfrbase
= drvdata
->sfrbase
;
190 if (__raw_sysmmu_version(sfrbase
) >= MAKE_MMU_VER(5, 1)) {
191 __raw_writel(iova
, sfrbase
+ REG_FLUSH_RANGE_START
);
192 __raw_writel(size
- 1 + iova
, sfrbase
+ REG_FLUSH_RANGE_END
);
193 writel(0x1, sfrbase
+ REG_MMU_FLUSH_RANGE
);
194 SYSMMU_EVENT_LOG_TLB_INV_RANGE(SYSMMU_DRVDATA_TO_LOG(drvdata
),
197 if (sysmmu_block(sfrbase
)) {
198 __sysmmu_tlb_invalidate_all(sfrbase
);
199 SYSMMU_EVENT_LOG_TLB_INV_ALL(
200 SYSMMU_DRVDATA_TO_LOG(drvdata
));
202 sysmmu_unblock(sfrbase
);
206 void __sysmmu_set_ptbase(void __iomem
*sfrbase
, phys_addr_t pfn_pgtable
)
208 __raw_writel(pfn_pgtable
, sfrbase
+ REG_PT_BASE_PPN
);
210 __sysmmu_tlb_invalidate_all(sfrbase
);
213 static void __sysmmu_disable_pbuf(struct sysmmu_drvdata
*drvdata
,
216 unsigned int i
, num_pb
;
217 void * __iomem sfrbase
= drvdata
->sfrbase
;
220 __raw_writel(target_grp
<< 8, sfrbase
+ REG_PB_INDICATE
);
222 __raw_writel(0, sfrbase
+ REG_PB_LMM
);
224 SYSMMU_EVENT_LOG_PBLMM(SYSMMU_DRVDATA_TO_LOG(drvdata
), 0, 0);
226 num_pb
= PB_INFO_NUM(__raw_readl(sfrbase
+ REG_PB_INFO
));
227 for (i
= 0; i
< num_pb
; i
++) {
228 __raw_writel((target_grp
<< 8) | i
, sfrbase
+ REG_PB_INDICATE
);
229 __raw_writel(0, sfrbase
+ REG_PB_CFG
);
230 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata
), 0, 0, 0);
234 static unsigned int find_lmm_preset(unsigned int num_pb
, unsigned int num_bufs
)
236 static char lmm_preset
[4][6] = { /* [num of PB][num of buffers] */
237 /* 1, 2, 3, 4, 5, 6 */
238 { 1, 1, 0, -1, -1, -1}, /* num of pb: 3 */
239 { 3, 2, 1, 0, -1, -1}, /* num of pb: 4 */
240 {-1, -1, -1, -1, -1, -1},
241 { 5, 5, 4, 2, 1, 0}, /* num of pb: 6 */
245 BUG_ON(num_bufs
> 6);
246 lmm
= lmm_preset
[num_pb
- 3][num_bufs
- 1];
251 static unsigned int find_num_pb(unsigned int num_pb
, unsigned int lmm
)
253 static char lmm_preset
[6][6] = { /* [pb_num - 1][pb_lmm] */
262 num_pb
= lmm_preset
[num_pb
- 1][lmm
];
267 static void __sysmmu_set_pbuf(struct sysmmu_drvdata
*drvdata
, int target_grp
,
268 struct sysmmu_prefbuf prefbuf
[], int num_bufs
)
270 unsigned int i
, num_pb
, lmm
;
272 __raw_writel(target_grp
<< 8, drvdata
->sfrbase
+ REG_PB_INDICATE
);
274 num_pb
= PB_INFO_NUM(__raw_readl(drvdata
->sfrbase
+ REG_PB_INFO
));
276 lmm
= find_lmm_preset(num_pb
, (unsigned int)num_bufs
);
277 num_pb
= find_num_pb(num_pb
, lmm
);
279 __raw_writel(lmm
, drvdata
->sfrbase
+ REG_PB_LMM
);
281 SYSMMU_EVENT_LOG_PBLMM(SYSMMU_DRVDATA_TO_LOG(drvdata
), lmm
, num_bufs
);
283 for (i
= 0; i
< num_pb
; i
++) {
284 __raw_writel(target_grp
<< 8 | i
,
285 drvdata
->sfrbase
+ REG_PB_INDICATE
);
286 __raw_writel(0, drvdata
->sfrbase
+ REG_PB_CFG
);
287 if ((prefbuf
[i
].size
> 0) && (i
< num_bufs
)) {
288 __raw_writel(prefbuf
[i
].base
,
289 drvdata
->sfrbase
+ REG_PB_START_ADDR
);
290 __raw_writel(prefbuf
[i
].size
- 1 + prefbuf
[i
].base
,
291 drvdata
->sfrbase
+ REG_PB_END_ADDR
);
292 __raw_writel(prefbuf
[i
].config
| 1,
293 drvdata
->sfrbase
+ REG_PB_CFG
);
295 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata
),
296 prefbuf
[i
].config
| 1, prefbuf
[i
].base
,
297 prefbuf
[i
].size
- 1 + prefbuf
[i
].base
);
300 dev_err(drvdata
->sysmmu
,
301 "%s: Trying to init PB[%d/%d]with zero-size\n",
302 __func__
, i
, num_bufs
);
303 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata
),
309 static void __sysmmu_set_pbuf_axi_id(struct sysmmu_drvdata
*drvdata
,
310 struct pb_info
*pb
, unsigned int ipoption
[],
311 unsigned int opoption
[])
313 int i
, j
, num_pb
, lmm
;
315 int total_plane_num
= pb
->ar_id_num
+ pb
->aw_id_num
;
318 if (total_plane_num
<= 0)
321 if (pb
->grp_num
< 0) {
322 pr_err("The group number(%d) is invalid\n", pb
->grp_num
);
326 __raw_writel(pb
->grp_num
<< 8, drvdata
->sfrbase
+ REG_PB_INDICATE
);
328 num_pb
= PB_INFO_NUM(__raw_readl(drvdata
->sfrbase
+ REG_PB_INFO
));
330 lmm
= find_lmm_preset(num_pb
, total_plane_num
);
331 num_pb
= find_num_pb(num_pb
, lmm
);
333 __raw_writel(lmm
, drvdata
->sfrbase
+ REG_PB_LMM
);
335 ret_num_pb
= min(pb
->ar_id_num
, num_pb
);
336 for (i
= 0; i
< ret_num_pb
; i
++) {
337 __raw_writel((pb
->grp_num
<< 8) | i
,
338 drvdata
->sfrbase
+ REG_PB_INDICATE
);
339 __raw_writel(0, drvdata
->sfrbase
+ REG_PB_CFG
);
340 __raw_writel((0xFFFF << 16) | pb
->ar_axi_id
[i
],
341 drvdata
->sfrbase
+ REG_PB_AXI_ID
);
342 opt
= ipoption
? ipoption
[i
] : SYSMMU_PBUFCFG_DEFAULT_INPUT
;
343 __raw_writel(opt
| 0x100001,
344 drvdata
->sfrbase
+ REG_PB_CFG
);
347 if ((num_pb
> ret_num_pb
)) {
348 for (i
= ret_num_pb
, j
= 0; i
< num_pb
; i
++, j
++) {
349 __raw_writel((pb
->grp_num
<< 8) | i
,
350 drvdata
->sfrbase
+ REG_PB_INDICATE
);
351 __raw_writel(0, drvdata
->sfrbase
+ REG_PB_CFG
);
352 __raw_writel((0xFFFF << 16) | pb
->aw_axi_id
[j
],
353 drvdata
->sfrbase
+ REG_PB_AXI_ID
);
354 opt
= opoption
? opoption
[i
] : SYSMMU_PBUFCFG_DEFAULT_OUTPUT
;
355 __raw_writel(opt
| 0x100001,
356 drvdata
->sfrbase
+ REG_PB_CFG
);
361 static void __sysmmu_set_pbuf_property(struct sysmmu_drvdata
*drvdata
,
362 struct pb_info
*pb
, unsigned int ipoption
[],
363 unsigned int opoption
[])
367 int total_plane_num
= pb
->ar_id_num
+ pb
->aw_id_num
;
370 if (total_plane_num
<= 0)
373 if (pb
->grp_num
< 0) {
374 pr_err("The group number(%d) is invalid\n", pb
->grp_num
);
378 num_pb
= PB_INFO_NUM(__raw_readl(drvdata
->sfrbase
+ REG_PB_INFO
));
379 lmm
= find_lmm_preset(num_pb
, total_plane_num
);
380 num_pb
= find_num_pb(num_pb
, lmm
);
382 ret_num_pb
= min(pb
->ar_id_num
, num_pb
);
383 for (i
= 0; i
< ret_num_pb
; i
++) {
384 __raw_writel((pb
->grp_num
<< 8) | i
,
385 drvdata
->sfrbase
+ REG_PB_INDICATE
);
386 opt
= ipoption
? ipoption
[i
] : SYSMMU_PBUFCFG_DEFAULT_INPUT
;
387 __raw_writel(opt
| 0x100001,
388 drvdata
->sfrbase
+ REG_PB_CFG
);
391 if ((num_pb
> ret_num_pb
)) {
392 for (i
= ret_num_pb
; i
< num_pb
; i
++) {
393 __raw_writel((pb
->grp_num
<< 8) | i
,
394 drvdata
->sfrbase
+ REG_PB_INDICATE
);
395 opt
= opoption
? opoption
[i
] : SYSMMU_PBUFCFG_DEFAULT_OUTPUT
;
396 __raw_writel(opt
| 0x100001,
397 drvdata
->sfrbase
+ REG_PB_CFG
);
402 static void __exynos_sysmmu_set_prefbuf_by_region(
403 struct sysmmu_drvdata
*drvdata
, struct device
*dev
,
404 struct sysmmu_prefbuf pb_reg
[], unsigned int num_reg
)
408 int orig_num_reg
, num_bufs
= 0;
409 struct sysmmu_prefbuf prefbuf
[6];
411 if (!has_sysmmu_capable_pbuf(drvdata
->sfrbase
))
414 if ((num_reg
== 0) || (pb_reg
== NULL
)) {
415 /* Disabling prefetch buffers */
416 __sysmmu_disable_pbuf(drvdata
, -1);
420 orig_num_reg
= num_reg
;
422 list_for_each_entry(pb
, &drvdata
->pb_grp_list
, node
) {
423 if (pb
->master
== dev
) {
424 for (i
= 0; i
< orig_num_reg
; i
++) {
425 if (((pb_reg
[i
].config
& SYSMMU_PBUFCFG_WRITE
) &&
426 (pb
->prop
& SYSMMU_PROP_WRITE
)) ||
427 (!(pb_reg
[i
].config
& SYSMMU_PBUFCFG_WRITE
) &&
428 (pb
->prop
& SYSMMU_PROP_READ
))) {
431 else if (num_reg
== 0)
434 prefbuf
[num_bufs
++] = pb_reg
[i
];
438 __sysmmu_set_pbuf(drvdata
, pb
->grp_num
, prefbuf
,
445 static void __exynos_sysmmu_set_prefbuf_axi_id(struct sysmmu_drvdata
*drvdata
,
446 struct device
*master
, unsigned int inplanes
,
447 unsigned int onplanes
, unsigned int ipoption
[],
448 unsigned int opoption
[])
452 if (!has_sysmmu_capable_pbuf(drvdata
->sfrbase
))
455 list_for_each_entry(pb
, &drvdata
->pb_grp_list
, node
) {
457 if (pb
->master
== master
) {
459 memcpy(&tpb
, pb
, sizeof(tpb
));
460 tpb
.ar_id_num
= inplanes
;
461 tpb
.aw_id_num
= onplanes
;
462 __sysmmu_set_pbuf_axi_id(drvdata
, &tpb
,
467 __sysmmu_set_pbuf_axi_id(drvdata
, pb
,
472 static void __exynos_sysmmu_set_prefbuf_property(struct sysmmu_drvdata
*drvdata
,
473 struct device
*master
, unsigned int inplanes
,
474 unsigned int onplanes
, unsigned int ipoption
[],
475 unsigned int opoption
[])
479 if (!has_sysmmu_capable_pbuf(drvdata
->sfrbase
))
485 list_for_each_entry(pb
, &drvdata
->pb_grp_list
, node
) {
486 if (pb
->master
== master
) {
488 memcpy(&tpb
, pb
, sizeof(tpb
));
489 tpb
.ar_id_num
= inplanes
;
490 tpb
.aw_id_num
= onplanes
;
491 __sysmmu_set_pbuf_property(drvdata
, &tpb
,
497 static void __sysmmu_set_df(void __iomem
*sfrbase
,
500 __raw_writel(iova
, sfrbase
+ REG_SW_DF_VPN
);
503 void __exynos_sysmmu_set_df(struct sysmmu_drvdata
*drvdata
, dma_addr_t iova
)
505 #ifdef CONFIG_EXYNOS7_IOMMU_CHECK_DF
506 int i
, num_l1tlb
, df_cnt
= 0;
510 if (MAKE_MMU_VER(5, 1) > __raw_sysmmu_version(drvdata
->sfrbase
)) {
511 dev_err(drvdata
->sysmmu
, "%s: SW direct fetch not supported\n",
516 #ifdef CONFIG_EXYNOS7_IOMMU_CHECK_DF
517 num_l1tlb
= MMU_NUM_L1TLB_ENTRIES(__raw_readl(drvdata
->sfrbase
+
519 for (i
= 0; i
< num_l1tlb
; i
++) {
520 __raw_writel(i
, drvdata
->sfrbase
+ REG_L1TLB_READ_ENTRY
);
521 cfg
= __raw_readl(drvdata
->sfrbase
+ REG_L1TLB_ENTRY_ATTR
);
522 if (cfg
& L1TLB_ATTR_IM
)
526 if (df_cnt
== num_l1tlb
) {
527 dev_err(drvdata
->sysmmu
,
528 "%s: All TLBs are special slots", __func__
);
532 cfg
= __raw_readl(drvdata
->sfrbase
+ REG_SW_DF_VPN_CMD_NUM
);
534 if ((cfg
& 0xFF) > 9) {
535 dev_info(drvdata
->sysmmu
,
536 "%s: DF command queue is full\n", __func__
);
539 __sysmmu_set_df(drvdata
->sfrbase
, iova
);
540 SYSMMU_EVENT_LOG_DF(SYSMMU_DRVDATA_TO_LOG(drvdata
), iova
);
544 void __exynos_sysmmu_release_df(struct sysmmu_drvdata
*drvdata
)
546 if (__raw_sysmmu_version(drvdata
->sfrbase
) >= MAKE_MMU_VER(5, 1)) {
547 __raw_writel(0x1, drvdata
->sfrbase
+ REG_L1TLB_CTRL
);
548 SYSMMU_EVENT_LOG_DF_UNLOCK_ALL(SYSMMU_DRVDATA_TO_LOG(drvdata
));
550 dev_err(drvdata
->sysmmu
, "DF is not supported");
554 void dump_sysmmu_tlb_pb(void __iomem
*sfrbase
)
556 int i
, j
, capa
, num_pb
, lmm
;
563 pgd
= pgd_offset_k((unsigned long)sfrbase
);
565 pr_crit("Invalid virtual address %p\n", sfrbase
);
569 pud
= pud_offset(pgd
, (unsigned long)sfrbase
);
571 pr_crit("Invalid virtual address %p\n", sfrbase
);
575 pmd
= pmd_offset(pud
, (unsigned long)sfrbase
);
577 pr_crit("Invalid virtual address %p\n", sfrbase
);
581 pte
= pte_offset_kernel(pmd
, (unsigned long)sfrbase
);
583 pr_crit("Invalid virtual address %p\n", sfrbase
);
587 capa
= __raw_readl(sfrbase
+ REG_MMU_CAPA
);
588 lmm
= MMU_RAW_VER(__raw_readl(sfrbase
+ REG_MMU_VERSION
));
590 phys
= pte_pfn(*pte
) << PAGE_SHIFT
;
591 pr_crit("ADDR: %pa(VA: %p), MMU_CTRL: %#010x, PT_BASE: %#010x\n",
593 __raw_readl(sfrbase
+ REG_MMU_CTRL
),
594 __raw_readl(sfrbase
+ REG_PT_BASE_PPN
));
595 pr_crit("VERSION %d.%d, MMU_CFG: %#010x, MMU_STATUS: %#010x\n",
596 MMU_MAJ_VER(lmm
), MMU_MIN_VER(lmm
),
597 __raw_readl(sfrbase
+ REG_MMU_CFG
),
598 __raw_readl(sfrbase
+ REG_MMU_STATUS
));
600 if (MMU_HAVE_L2TLB(__raw_readl(sfrbase
+ REG_MMU_CAPA_1
)))
601 pr_crit("Level 2 TLB: %s\n",
602 (__raw_readl(sfrbase
+ REG_L2TLB_CFG
) == 1) ?
605 pr_crit("---------- Level 1 TLB -----------------------------------\n");
607 for (i
= 0; i
< MMU_NUM_L1TLB_ENTRIES(capa
); i
++) {
608 __raw_writel(i
, sfrbase
+ REG_L1TLB_READ_ENTRY
);
609 pr_crit("[%02d] VPN: %#010x, PPN: %#010x, ATTR: %#010x\n",
610 i
, __raw_readl(sfrbase
+ REG_L1TLB_ENTRY_VPN
),
611 __raw_readl(sfrbase
+ REG_L1TLB_ENTRY_PPN
),
612 __raw_readl(sfrbase
+ REG_L1TLB_ENTRY_ATTR
));
615 if (!has_sysmmu_capable_pbuf(sfrbase
))
618 pr_crit("---------- Prefetch Buffers ------------------------------\n");
620 for (i
= 0; i
< MMU_PB_GRP_NUM(capa
); i
++) {
621 __raw_writel(i
<< 8, sfrbase
+ REG_PB_INDICATE
);
623 num_pb
= PB_INFO_NUM(__raw_readl(sfrbase
+ REG_PB_INFO
));
624 lmm
= __raw_readl(sfrbase
+ REG_PB_LMM
);
625 pr_crit("PB_INFO[%d]: %#010x, PB_LMM: %#010x\n",
628 num_pb
= find_num_pb(num_pb
, lmm
);
629 for (j
= 0; j
< num_pb
; j
++) {
630 __raw_writel((i
<< 8) | j
, sfrbase
+ REG_PB_INDICATE
);
631 pr_crit("PB[%d][%d] = CFG: %#010x, AXI ID: %#010x, ", i
,
632 j
, __raw_readl(sfrbase
+ REG_PB_CFG
),
633 __raw_readl(sfrbase
+ REG_PB_AXI_ID
));
634 pr_crit("PB[%d][%d] START: %#010x, END: %#010x\n", i
, j
,
635 __raw_readl(sfrbase
+ REG_PB_START_ADDR
),
636 __raw_readl(sfrbase
+ REG_PB_END_ADDR
));
637 pr_crit("SPB0 START: %#010x, END: %#010x, VALID: %#010x\n",
638 __raw_readl(sfrbase
+ REG_PCI_SPB0_SVPN
),
639 __raw_readl(sfrbase
+ REG_PCI_SPB0_EVPN
),
640 __raw_readl(sfrbase
+ REG_PCI_SPB0_SLOT_VALID
));
641 pr_crit("SPB1 START: %#010x, END: %#010x, VALID: %#010x\n",
642 __raw_readl(sfrbase
+ REG_PCI_SPB1_SVPN
),
643 __raw_readl(sfrbase
+ REG_PCI_SPB1_EVPN
),
644 __raw_readl(sfrbase
+ REG_PCI_SPB1_SLOT_VALID
));
649 static void show_fault_information(struct sysmmu_drvdata
*drvdata
,
650 int flags
, unsigned long fault_addr
)
654 int fault_id
= SYSMMU_FAULT_ID(flags
);
656 pgtable
= __raw_readl(drvdata
->sfrbase
+ REG_PT_BASE_PPN
);
657 pgtable
<<= PAGE_SHIFT
;
659 pr_crit("----------------------------------------------------------\n");
660 pr_crit("%s %s %s at %#010lx (page table @ %pa)\n",
661 dev_name(drvdata
->sysmmu
),
662 (flags
& IOMMU_FAULT_WRITE
) ? "WRITE" : "READ",
663 sysmmu_fault_name
[fault_id
], fault_addr
, &pgtable
);
665 if (fault_id
== SYSMMU_FAULT_UNKNOWN
) {
666 pr_crit("The fault is not caused by this System MMU.\n");
667 pr_crit("Please check IRQ and SFR base address.\n");
671 info
= __raw_readl(drvdata
->sfrbase
+
672 ((flags
& IOMMU_FAULT_WRITE
) ?
673 REG_FAULT_AW_TRANS_INFO
: REG_FAULT_AR_TRANS_INFO
));
674 pr_crit("AxID: %#x, AxLEN: %#x\n", info
& 0xFFFF, (info
>> 16) & 0xF);
676 if (pgtable
!= drvdata
->pgtable
)
677 pr_crit("Page table base of driver: %pa\n",
680 if (fault_id
== SYSMMU_FAULT_PTW_ACCESS
)
681 pr_crit("System MMU has failed to access page table\n");
683 if (!pfn_valid(pgtable
>> PAGE_SHIFT
)) {
684 pr_crit("Page table base is not in a valid memory region\n");
687 ent
= section_entry(phys_to_virt(pgtable
), fault_addr
);
688 pr_crit("Lv1 entry: %#010x\n", *ent
);
690 if (lv1ent_page(ent
)) {
691 ent
= page_entry(ent
, fault_addr
);
692 pr_crit("Lv2 entry: %#010x\n", *ent
);
696 dump_sysmmu_tlb_pb(drvdata
->sfrbase
);
699 pr_crit("----------------------------------------------------------\n");
702 #define REG_INT_STATUS_WRITE_BIT 16
704 irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
706 /* SYSMMU is in blocked when interrupt occurred. */
707 struct sysmmu_drvdata
*drvdata
= dev_id
;
709 unsigned long addr
= -1;
712 WARN(!is_sysmmu_active(drvdata
),
713 "Fault occurred while System MMU %s is not enabled!\n",
714 dev_name(drvdata
->sysmmu
));
716 itype
= __ffs(__raw_readl(drvdata
->sfrbase
+ REG_INT_STATUS
));
717 if (itype
>= REG_INT_STATUS_WRITE_BIT
) {
718 itype
-= REG_INT_STATUS_WRITE_BIT
;
719 flags
= IOMMU_FAULT_WRITE
;
722 if (WARN_ON(!(itype
< SYSMMU_FAULT_UNKNOWN
)))
723 itype
= SYSMMU_FAULT_UNKNOWN
;
725 addr
= __raw_readl(drvdata
->sfrbase
+
726 ((flags
& IOMMU_FAULT_WRITE
) ?
727 REG_FAULT_AW_ADDR
: REG_FAULT_AR_ADDR
));
728 flags
|= SYSMMU_FAULT_FLAG(itype
);
730 show_fault_information(drvdata
, flags
, addr
);
732 atomic_notifier_call_chain(&drvdata
->fault_notifiers
, addr
, &flags
);
734 #if 0 /* Recovering System MMU fault is available from System MMU v6 */
736 ((itype
== SYSMMU_FAULT_PAGE_FAULT
) ||
737 (itype
== SYSMMU_FAULT_ACCESS
))) {
738 if (flags
& IOMMU_FAULT_WRITE
)
739 itype
+= REG_INT_STATUS_WRITE_BIT
;
740 __raw_writel(1 << itype
, drvdata
->sfrbase
+ REG_INT_CLEAR
);
742 sysmmu_unblock(drvdata
->sfrbase
);
746 panic("Unrecoverable System MMU Fault!!");
751 void __sysmmu_init_config(struct sysmmu_drvdata
*drvdata
)
755 __raw_writel(CTRL_BLOCK
, drvdata
->sfrbase
+ REG_MMU_CTRL
);
759 if (!(drvdata
->prop
& SYSMMU_PROP_DISABLE_ACG
))
762 if (!(drvdata
->qos
< 0))
763 cfg
|= CFG_QOS_OVRRIDE
| CFG_QOS(drvdata
->qos
);
765 if (has_sysmmu_capable_pbuf(drvdata
->sfrbase
))
766 __exynos_sysmmu_set_prefbuf_axi_id(drvdata
, NULL
, 0, 0,
769 cfg
|= __raw_readl(drvdata
->sfrbase
+ REG_MMU_CFG
) & ~CFG_MASK
;
770 __raw_writel(cfg
, drvdata
->sfrbase
+ REG_MMU_CFG
);
773 void sysmmu_tlb_invalidate_flpdcache(struct device
*dev
, dma_addr_t iova
)
775 struct sysmmu_list_data
*list
;
777 for_each_sysmmu_list(dev
, list
) {
779 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
781 spin_lock_irqsave(&drvdata
->lock
, flags
);
782 if (is_sysmmu_active(drvdata
) &&
783 is_sysmmu_runtime_active(drvdata
)) {
784 TRACE_LOG_DEV(drvdata
->sysmmu
,
785 "FLPD invalidation @ %#x\n", iova
);
786 __master_clk_enable(drvdata
);
787 __sysmmu_tlb_invalidate_flpdcache(
788 drvdata
->sfrbase
, iova
);
789 SYSMMU_EVENT_LOG_FLPD_FLUSH(
790 SYSMMU_DRVDATA_TO_LOG(drvdata
), iova
);
791 __master_clk_disable(drvdata
);
793 TRACE_LOG_DEV(drvdata
->sysmmu
,
794 "Skip FLPD invalidation @ %#x\n", iova
);
796 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
800 static void sysmmu_tlb_invalidate_entry(struct device
*dev
, dma_addr_t iova
,
803 struct sysmmu_list_data
*list
;
805 for_each_sysmmu_list(dev
, list
) {
807 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
809 if (!force
&& !(drvdata
->prop
& SYSMMU_PROP_NONBLOCK_TLBINV
))
812 spin_lock_irqsave(&drvdata
->lock
, flags
);
813 if (is_sysmmu_active(drvdata
) &&
814 is_sysmmu_runtime_active(drvdata
)) {
815 TRACE_LOG_DEV(drvdata
->sysmmu
,
816 "TLB invalidation @ %#x\n", iova
);
817 __master_clk_enable(drvdata
);
818 __sysmmu_tlb_invalidate_entry(drvdata
->sfrbase
, iova
);
819 SYSMMU_EVENT_LOG_TLB_INV_VPN(
820 SYSMMU_DRVDATA_TO_LOG(drvdata
), iova
);
821 __master_clk_disable(drvdata
);
823 TRACE_LOG_DEV(drvdata
->sysmmu
,
824 "Skip TLB invalidation @ %#x\n", iova
);
826 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
830 void exynos_sysmmu_tlb_invalidate(struct iommu_domain
*domain
, dma_addr_t start
,
833 struct exynos_iommu_domain
*priv
= domain
->priv
;
834 struct exynos_iommu_owner
*owner
;
835 struct sysmmu_list_data
*list
;
838 spin_lock_irqsave(&priv
->lock
, flags
);
839 list_for_each_entry(owner
, &priv
->clients
, client
) {
840 for_each_sysmmu_list(owner
->dev
, list
) {
841 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
843 if (!!(drvdata
->prop
& SYSMMU_PROP_NONBLOCK_TLBINV
))
846 spin_lock(&drvdata
->lock
);
847 if (!is_sysmmu_active(drvdata
) ||
848 !is_sysmmu_runtime_active(drvdata
)) {
849 spin_unlock(&drvdata
->lock
);
850 TRACE_LOG_DEV(drvdata
->sysmmu
,
851 "Skip TLB invalidation %#x@%#x\n", size
, start
);
855 TRACE_LOG_DEV(drvdata
->sysmmu
,
856 "TLB invalidation %#x@%#x\n", size
, start
);
858 __master_clk_enable(drvdata
);
860 __sysmmu_tlb_invalidate(drvdata
, start
, size
);
862 __master_clk_disable(drvdata
);
864 spin_unlock(&drvdata
->lock
);
867 spin_unlock_irqrestore(&priv
->lock
, flags
);
870 static inline void __sysmmu_disable_nocount(struct sysmmu_drvdata
*drvdata
)
872 int disable
= (drvdata
->prop
& SYSMMU_PROP_STOP_BLOCK
) ?
873 CTRL_BLOCK_DISABLE
: CTRL_DISABLE
;
875 __raw_sysmmu_disable(drvdata
->sfrbase
, disable
);
877 __sysmmu_clk_disable(drvdata
);
878 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE
))
879 __master_clk_disable(drvdata
);
881 SYSMMU_EVENT_LOG_DISABLE(SYSMMU_DRVDATA_TO_LOG(drvdata
));
883 TRACE_LOG("%s(%s)\n", __func__
, dev_name(drvdata
->sysmmu
));
886 static bool __sysmmu_disable(struct sysmmu_drvdata
*drvdata
)
891 spin_lock_irqsave(&drvdata
->lock
, flags
);
893 disabled
= set_sysmmu_inactive(drvdata
);
896 drvdata
->pgtable
= 0;
897 drvdata
->domain
= NULL
;
899 if (is_sysmmu_runtime_active(drvdata
)) {
900 __master_clk_enable(drvdata
);
901 __sysmmu_disable_nocount(drvdata
);
902 __master_clk_disable(drvdata
);
905 TRACE_LOG_DEV(drvdata
->sysmmu
, "Disabled\n");
907 TRACE_LOG_DEV(drvdata
->sysmmu
, "%d times left to disable\n",
908 drvdata
->activations
);
911 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
916 static void __sysmmu_enable_nocount(struct sysmmu_drvdata
*drvdata
)
918 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE
))
919 __master_clk_enable(drvdata
);
921 __sysmmu_clk_enable(drvdata
);
923 __sysmmu_init_config(drvdata
);
925 __sysmmu_set_ptbase(drvdata
->sfrbase
, drvdata
->pgtable
/ PAGE_SIZE
);
927 __raw_sysmmu_enable(drvdata
->sfrbase
);
929 SYSMMU_EVENT_LOG_ENABLE(SYSMMU_DRVDATA_TO_LOG(drvdata
));
931 TRACE_LOG_DEV(drvdata
->sysmmu
, "Really enabled\n");
934 static int __sysmmu_enable(struct sysmmu_drvdata
*drvdata
,
935 phys_addr_t pgtable
, struct iommu_domain
*domain
)
940 spin_lock_irqsave(&drvdata
->lock
, flags
);
941 if (set_sysmmu_active(drvdata
)) {
942 drvdata
->pgtable
= pgtable
;
943 drvdata
->domain
= domain
;
945 if (is_sysmmu_runtime_active(drvdata
)) {
946 __master_clk_enable(drvdata
);
947 __sysmmu_enable_nocount(drvdata
);
948 __master_clk_disable(drvdata
);
951 TRACE_LOG_DEV(drvdata
->sysmmu
, "Enabled\n");
953 ret
= (pgtable
== drvdata
->pgtable
) ? 1 : -EBUSY
;
955 TRACE_LOG_DEV(drvdata
->sysmmu
, "Already enabled (%d)\n", ret
);
958 if (WARN_ON(ret
< 0))
959 set_sysmmu_inactive(drvdata
); /* decrement count */
961 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
966 /* __exynos_sysmmu_enable: Enables System MMU
968 * returns -error if an error occurred and System MMU is not enabled,
969 * 0 if the System MMU has been just enabled and 1 if System MMU was already
972 static int __exynos_sysmmu_enable(struct device
*dev
, phys_addr_t pgtable
,
973 struct iommu_domain
*domain
)
977 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
978 struct sysmmu_list_data
*list
;
980 BUG_ON(!has_sysmmu(dev
));
982 spin_lock_irqsave(&owner
->lock
, flags
);
984 for_each_sysmmu_list(dev
, list
) {
985 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
986 ret
= __sysmmu_enable(drvdata
, pgtable
, domain
);
988 struct sysmmu_list_data
*iter
;
989 for_each_sysmmu_list(dev
, iter
) {
992 __sysmmu_disable(dev_get_drvdata(iter
->sysmmu
));
998 spin_unlock_irqrestore(&owner
->lock
, flags
);
1003 int exynos_sysmmu_enable(struct device
*dev
, unsigned long pgtable
)
1007 BUG_ON(!memblock_is_memory(pgtable
));
1009 ret
= __exynos_sysmmu_enable(dev
, pgtable
, NULL
);
1014 bool exynos_sysmmu_disable(struct device
*dev
)
1016 unsigned long flags
;
1017 bool disabled
= true;
1018 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1019 struct sysmmu_list_data
*list
;
1021 BUG_ON(!has_sysmmu(dev
));
1023 spin_lock_irqsave(&owner
->lock
, flags
);
1025 /* Every call to __sysmmu_disable() must return same result */
1026 for_each_sysmmu_list(dev
, list
) {
1027 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1028 disabled
= __sysmmu_disable(drvdata
);
1031 spin_unlock_irqrestore(&owner
->lock
, flags
);
1036 #ifdef CONFIG_EXYNOS_IOMMU_RECOVER_FAULT_HANDLER
1037 int recover_fault_handler (struct iommu_domain
*domain
,
1038 struct device
*dev
, unsigned long fault_addr
,
1039 int itype
, void *reserved
)
1041 struct exynos_iommu_domain
*priv
= domain
->priv
;
1042 struct exynos_iommu_owner
*owner
;
1043 unsigned long flags
;
1047 if (itype
== SYSMMU_PAGEFAULT
) {
1048 struct exynos_iovmm
*vmm_data
;
1052 BUG_ON(priv
->pgtable
== NULL
);
1054 sent
= section_entry(priv
->pgtable
, fault_addr
);
1055 if (!lv1ent_page(sent
)) {
1056 pent
= kmem_cache_zalloc(lv2table_kmem_cache
,
1061 *sent
= mk_lv1ent_page(__pa(pent
));
1062 pgtable_flush(sent
, sent
+ 1);
1064 pent
= page_entry(sent
, fault_addr
);
1065 if (lv2ent_fault(pent
)) {
1066 *pent
= mk_lv2ent_spage(fault_page
);
1067 pgtable_flush(pent
, pent
+ 1);
1069 pr_err("[%s] 0x%lx by '%s' is already mapped\n",
1070 sysmmu_fault_name
[itype
], fault_addr
,
1074 owner
= dev
->archdata
.iommu
;
1075 vmm_data
= (struct exynos_iovmm
*)owner
->vmm_data
;
1076 if (find_iovm_region(vmm_data
, fault_addr
)) {
1077 pr_err("[%s] 0x%lx by '%s' is remapped\n",
1078 sysmmu_fault_name
[itype
],
1079 fault_addr
, dev_name(dev
));
1081 pr_err("[%s] '%s' accessed unmapped address(0x%lx)\n",
1082 sysmmu_fault_name
[itype
], dev_name(dev
),
1085 } else if (itype
== SYSMMU_L1TLB_MULTIHIT
) {
1086 spin_lock_irqsave(&priv
->lock
, flags
);
1087 list_for_each_entry(owner
, &priv
->clients
, client
)
1088 sysmmu_tlb_invalidate_entry(owner
->dev
,
1089 (dma_addr_t
)fault_addr
, true);
1090 spin_unlock_irqrestore(&priv
->lock
, flags
);
1092 pr_err("[%s] occured at 0x%lx by '%s'\n",
1093 sysmmu_fault_name
[itype
], fault_addr
, dev_name(dev
));
1101 int recover_fault_handler (struct iommu_domain
*domain
,
1102 struct device
*dev
, unsigned long fault_addr
,
1103 int itype
, void *reserved
)
1109 void sysmmu_set_prefetch_buffer_by_region(struct device
*dev
,
1110 struct sysmmu_prefbuf pb_reg
[], unsigned int num_reg
)
1112 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1113 struct sysmmu_list_data
*list
;
1114 unsigned long flags
;
1116 if (!dev
->archdata
.iommu
) {
1117 dev_err(dev
, "%s: No System MMU is configured\n", __func__
);
1121 spin_lock_irqsave(&owner
->lock
, flags
);
1123 for_each_sysmmu_list(dev
, list
) {
1124 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1126 spin_lock(&drvdata
->lock
);
1128 if (!is_sysmmu_active(drvdata
) ||
1129 !is_sysmmu_runtime_active(drvdata
)) {
1130 spin_unlock(&drvdata
->lock
);
1134 __master_clk_enable(drvdata
);
1136 if (sysmmu_block(drvdata
->sfrbase
)) {
1137 __exynos_sysmmu_set_prefbuf_by_region(
1138 drvdata
, dev
, pb_reg
, num_reg
);
1139 sysmmu_unblock(drvdata
->sfrbase
);
1142 __master_clk_disable(drvdata
);
1144 spin_unlock(&drvdata
->lock
);
1147 spin_unlock_irqrestore(&owner
->lock
, flags
);
1150 int sysmmu_set_prefetch_buffer_by_plane(struct device
*dev
,
1151 unsigned int inplanes
, unsigned int onplanes
,
1152 unsigned int ipoption
[], unsigned int opoption
[])
1154 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1155 struct sysmmu_list_data
*list
;
1156 unsigned long flags
;
1158 if (!dev
->archdata
.iommu
) {
1159 dev_err(dev
, "%s: No System MMU is configured\n", __func__
);
1163 spin_lock_irqsave(&owner
->lock
, flags
);
1165 for_each_sysmmu_list(dev
, list
) {
1166 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1168 spin_lock(&drvdata
->lock
);
1170 if (!is_sysmmu_active(drvdata
) ||
1171 !is_sysmmu_runtime_active(drvdata
)) {
1172 spin_unlock(&drvdata
->lock
);
1176 __master_clk_enable(drvdata
);
1178 if (sysmmu_block(drvdata
->sfrbase
)) {
1179 __exynos_sysmmu_set_prefbuf_axi_id(drvdata
, dev
,
1180 inplanes
, onplanes
, ipoption
, opoption
);
1181 sysmmu_unblock(drvdata
->sfrbase
);
1184 __master_clk_disable(drvdata
);
1186 spin_unlock(&drvdata
->lock
);
1189 spin_unlock_irqrestore(&owner
->lock
, flags
);
1194 int sysmmu_set_prefetch_buffer_property(struct device
*dev
,
1195 unsigned int inplanes
, unsigned int onplanes
,
1196 unsigned int ipoption
[], unsigned int opoption
[])
1198 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1199 struct sysmmu_list_data
*list
;
1200 unsigned long flags
;
1202 if (!dev
->archdata
.iommu
) {
1203 dev_err(dev
, "%s: No System MMU is configured\n", __func__
);
1207 spin_lock_irqsave(&owner
->lock
, flags
);
1209 for_each_sysmmu_list(dev
, list
) {
1210 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1212 spin_lock(&drvdata
->lock
);
1214 if (!is_sysmmu_active(drvdata
) ||
1215 !is_sysmmu_runtime_active(drvdata
)) {
1216 spin_unlock(&drvdata
->lock
);
1220 __master_clk_enable(drvdata
);
1221 __exynos_sysmmu_set_prefbuf_property(drvdata
, dev
,
1222 inplanes
, onplanes
, ipoption
, opoption
);
1223 __master_clk_disable(drvdata
);
1225 spin_unlock(&drvdata
->lock
);
1228 spin_unlock_irqrestore(&owner
->lock
, flags
);
1232 static void __sysmmu_set_ptwqos(struct sysmmu_drvdata
*data
)
1236 if (!sysmmu_block(data
->sfrbase
))
1239 cfg
= __raw_readl(data
->sfrbase
+ REG_MMU_CFG
);
1240 cfg
&= ~CFG_QOS(15); /* clearing PTW_QOS field */
1243 * PTW_QOS of System MMU 1.x ~ 3.x are all overridable
1244 * in __sysmmu_init_config()
1246 if (__raw_sysmmu_version(data
->sfrbase
) < MAKE_MMU_VER(5, 0))
1247 cfg
|= CFG_QOS(data
->qos
);
1248 else if (!(data
->qos
< 0))
1249 cfg
|= CFG_QOS_OVRRIDE
| CFG_QOS(data
->qos
);
1251 cfg
&= ~CFG_QOS_OVRRIDE
;
1253 __raw_writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
1254 sysmmu_unblock(data
->sfrbase
);
1257 static void __sysmmu_set_qos(struct device
*dev
, unsigned int qosval
)
1259 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1260 struct sysmmu_list_data
*list
;
1261 unsigned long flags
;
1263 spin_lock_irqsave(&owner
->lock
, flags
);
1265 for_each_sysmmu_list(dev
, list
) {
1266 struct sysmmu_drvdata
*data
;
1267 data
= dev_get_drvdata(list
->sysmmu
);
1268 spin_lock(&data
->lock
);
1270 if (is_sysmmu_really_enabled(data
)) {
1271 __master_clk_enable(data
);
1272 __sysmmu_set_ptwqos(data
);
1273 __master_clk_disable(data
);
1275 spin_unlock(&data
->lock
);
1278 spin_unlock_irqrestore(&owner
->lock
, flags
);
1281 void sysmmu_set_qos(struct device
*dev
, unsigned int qos
)
1283 __sysmmu_set_qos(dev
, (qos
> 15) ? 15 : qos
);
1286 void sysmmu_reset_qos(struct device
*dev
)
1288 __sysmmu_set_qos(dev
, DEFAULT_QOS_VALUE
);
1291 void exynos_sysmmu_set_df(struct device
*dev
, dma_addr_t iova
)
1293 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1294 struct sysmmu_list_data
*list
;
1295 unsigned long flags
;
1296 struct exynos_iovmm
*vmm
;
1299 BUG_ON(!has_sysmmu(dev
));
1301 vmm
= exynos_get_iovmm(dev
);
1303 dev_err(dev
, "%s: IOVMM not found\n", __func__
);
1307 plane
= find_iovmm_plane(vmm
, iova
);
1309 dev_err(dev
, "%s: IOVA %pa is out of IOVMM\n", __func__
, &iova
);
1313 spin_lock_irqsave(&owner
->lock
, flags
);
1315 for_each_sysmmu_list(dev
, list
) {
1316 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1318 spin_lock(&drvdata
->lock
);
1320 if (is_sysmmu_active(drvdata
) &&
1321 is_sysmmu_runtime_active(drvdata
)) {
1322 __master_clk_enable(drvdata
);
1323 if (drvdata
->prop
& SYSMMU_PROP_WINDOW_MASK
) {
1325 prop
= drvdata
->prop
& SYSMMU_PROP_WINDOW_MASK
;
1326 prop
>>= SYSMMU_PROP_WINDOW_SHIFT
;
1327 if (prop
& (1 << plane
))
1328 __exynos_sysmmu_set_df(drvdata
, iova
);
1330 __exynos_sysmmu_set_df(drvdata
, iova
);
1332 __master_clk_disable(drvdata
);
1334 spin_unlock(&drvdata
->lock
);
1337 spin_unlock_irqrestore(&owner
->lock
, flags
);
1340 void exynos_sysmmu_release_df(struct device
*dev
)
1342 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1343 struct sysmmu_list_data
*list
;
1344 unsigned long flags
;
1346 BUG_ON(!has_sysmmu(dev
));
1348 spin_lock_irqsave(&owner
->lock
, flags
);
1350 for_each_sysmmu_list(dev
, list
) {
1351 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
1353 spin_lock(&drvdata
->lock
);
1354 if (is_sysmmu_active(drvdata
) &&
1355 is_sysmmu_runtime_active(drvdata
)) {
1356 __master_clk_enable(drvdata
);
1357 __exynos_sysmmu_release_df(drvdata
);
1358 __master_clk_disable(drvdata
);
1360 spin_unlock(&drvdata
->lock
);
1363 spin_unlock_irqrestore(&owner
->lock
, flags
);
1366 static int __init
__sysmmu_init_clock(struct device
*sysmmu
,
1367 struct sysmmu_drvdata
*drvdata
)
1371 /* Initialize SYSMMU clocks */
1372 for (i
= 0; i
< SYSMMU_CLK_NUM
; i
++)
1373 drvdata
->clocks
[i
] = ERR_PTR(-ENOENT
);
1375 for (i
= 0; i
< SYSMMU_CLK_NUM
; i
++) {
1376 drvdata
->clocks
[i
] =
1377 devm_clk_get(sysmmu
, sysmmu_clock_names
[i
]);
1378 if (IS_ERR(drvdata
->clocks
[i
]) &&
1379 !(drvdata
->clocks
[i
] == ERR_PTR(-ENOENT
))) {
1380 dev_err(sysmmu
, "Failed to get sysmmu %s clock\n",
1381 sysmmu_clock_names
[i
]);
1382 return PTR_ERR(drvdata
->clocks
[i
]);
1383 } else if (drvdata
->clocks
[i
] == ERR_PTR(-ENOENT
)) {
1387 ret
= clk_prepare(drvdata
->clocks
[i
]);
1389 dev_err(sysmmu
, "Failed to prepare sysmmu %s clock\n",
1390 sysmmu_clock_names
[i
]);
1392 if (!IS_ERR(drvdata
->clocks
[i
]))
1393 clk_unprepare(drvdata
->clocks
[i
]);
1402 int __sysmmu_init_pb_info(struct device
*sysmmu
, struct device
*master
,
1403 struct sysmmu_drvdata
*data
, struct device_node
*pb_node
,
1404 struct of_phandle_args
*pb_args
, int grp_num
)
1410 pb
= devm_kzalloc(sysmmu
, sizeof(*pb
), GFP_KERNEL
);
1412 dev_err(sysmmu
, "%s: failed to allocate pb_info[%d]\n",
1417 pb
->master
= master
;
1419 pb
->grp_num
= grp_num
;
1420 for (i
= 0; i
< MAX_NUM_PBUF
; i
++) {
1421 pb
->ar_axi_id
[i
] = -1;
1422 pb
->aw_axi_id
[i
] = -1;
1425 INIT_LIST_HEAD(&pb
->node
);
1427 for (i
= 0; i
< pb_args
->args_count
; i
++) {
1428 if (is_axi_id(pb_args
->args
[i
])) {
1429 if (is_ar_axi_id(pb_args
->args
[i
])) {
1430 pb
->ar_axi_id
[pb
->ar_id_num
] = pb_args
->args
[i
];
1433 pb
->aw_axi_id
[pb
->aw_id_num
] =
1434 pb_args
->args
[i
] & AXIID_MASK
;
1440 ret
= of_property_read_string(pb_node
, "dir", &s
);
1443 for (val
= 1; val
< ARRAY_SIZE(sysmmu_prop_opts
); val
++) {
1444 if (!strcasecmp(s
, sysmmu_prop_opts
[val
])) {
1445 pb
->prop
&= ~SYSMMU_PROP_RW_MASK
;
1450 } else if (ret
&& ret
== -EINVAL
) {
1451 pb
->prop
= SYSMMU_PROP_READWRITE
;
1453 dev_err(sysmmu
, "%s: failed to get PB Direction of %s\n",
1454 __func__
, pb_args
->np
->full_name
);
1455 devm_kfree(sysmmu
, pb
);
1459 list_add_tail(&pb
->node
, &data
->pb_grp_list
);
1461 dev_info(sysmmu
, "device node[%d] : %s\n",
1462 pb
->grp_num
, pb_args
->np
->name
);
1463 dev_info(sysmmu
, "ar[%d] = {%d, %d, %d, %d, %d, %d}\n",
1465 pb
->ar_axi_id
[0], pb
->ar_axi_id
[1],
1466 pb
->ar_axi_id
[2], pb
->ar_axi_id
[3],
1467 pb
->ar_axi_id
[4], pb
->ar_axi_id
[5]);
1468 dev_info(sysmmu
, "aw[%d]= {%d, %d, %d, %d, %d, %d}\n",
1470 pb
->aw_axi_id
[0], pb
->aw_axi_id
[1],
1471 pb
->aw_axi_id
[2], pb
->aw_axi_id
[3],
1472 pb
->aw_axi_id
[4], pb
->aw_axi_id
[5]);
1476 int __sysmmu_update_owner(struct device
*master
, struct device
*sysmmu
)
1478 struct exynos_iommu_owner
*owner
;
1479 struct sysmmu_list_data
*list_data
;
1481 owner
= master
->archdata
.iommu
;
1483 owner
= kzalloc(sizeof(*owner
), GFP_KERNEL
);
1485 dev_err(master
, "%s: Failed to allocate owner structure\n",
1490 INIT_LIST_HEAD(&owner
->mmu_list
);
1491 INIT_LIST_HEAD(&owner
->client
);
1492 owner
->dev
= master
;
1493 spin_lock_init(&owner
->lock
);
1495 master
->archdata
.iommu
= owner
;
1496 if (!sysmmu_owner_list
) {
1497 sysmmu_owner_list
= owner
;
1499 owner
->next
= sysmmu_owner_list
->next
;
1500 sysmmu_owner_list
->next
= owner
;
1504 list_for_each_entry(list_data
, &owner
->mmu_list
, node
)
1505 if (list_data
->sysmmu
== sysmmu
)
1508 list_data
= devm_kzalloc(sysmmu
, sizeof(*list_data
), GFP_KERNEL
);
1510 dev_err(sysmmu
, "%s: Failed to allocate sysmmu_list_data\n",
1515 INIT_LIST_HEAD(&list_data
->node
);
1516 list_data
->sysmmu
= sysmmu
;
1519 * System MMUs are attached in the order of the presence
1522 list_add_tail(&list_data
->node
, &owner
->mmu_list
);
1523 dev_info(master
, "--> %s\n", dev_name(sysmmu
));
1528 static struct platform_device
* __init
__sysmmu_init_owner(struct device
*sysmmu
,
1529 struct sysmmu_drvdata
*data
,
1530 struct of_phandle_args
*pb_args
)
1532 struct device_node
*master_node
= pb_args
->np
;
1533 struct platform_device
*master
;
1536 master
= of_find_device_by_node(master_node
);
1538 pr_err("%s: failed to get master device in '%s'\n",
1539 __func__
, master_node
->full_name
);
1540 return ERR_PTR(-EINVAL
);
1543 ret
= __sysmmu_update_owner(&master
->dev
, sysmmu
);
1545 pr_err("%s: failed to update iommu owner '%s'\n",
1546 __func__
, dev_name(&master
->dev
));
1547 of_node_put(master_node
);
1548 return ERR_PTR(-EINVAL
);
1551 of_node_put(master_node
);
1556 static int __init
__sysmmu_init_master_info(struct device
*sysmmu
,
1557 struct sysmmu_drvdata
*data
)
1559 struct device_node
*node
;
1560 struct device_node
*pb_info
;
1564 pb_info
= of_get_child_by_name(sysmmu
->of_node
, "pb-info");
1566 pr_info("%s: 'master-info' node not found from '%s' node\n",
1567 __func__
, dev_name(sysmmu
));
1571 INIT_LIST_HEAD(&data
->pb_grp_list
);
1573 for_each_child_of_node(pb_info
, node
) {
1574 struct of_phandle_args pb_args
;
1575 struct platform_device
*master
;
1576 int i
, master_cnt
= 0;
1578 master_cnt
= of_count_phandle_with_args(node
,
1579 "master_axi_id_list",
1582 for (i
= 0; i
< master_cnt
; i
++) {
1583 memset(&pb_args
, 0x0, sizeof(pb_args
));
1584 ret
= of_parse_phandle_with_args(node
,
1585 "master_axi_id_list",
1586 "#pb-id-cells", i
, &pb_args
);
1589 pr_err("%s: failed to get PB info of %s\n",
1590 __func__
, dev_name(data
->sysmmu
));
1594 master
= __sysmmu_init_owner(sysmmu
, data
, &pb_args
);
1595 if (IS_ERR(master
)) {
1597 of_node_put(pb_args
.np
);
1598 pr_err("%s: failed to initialize sysmmu(%s)"
1599 "owner info\n", __func__
,
1600 dev_name(data
->sysmmu
));
1601 return PTR_ERR(master
);
1604 ret
= __sysmmu_init_pb_info(sysmmu
, &master
->dev
, data
,
1605 node
, &pb_args
, grp_num
);
1608 of_node_put(pb_args
.np
);
1609 pr_err("%s: failed to update pb axi id '%s'\n",
1610 __func__
, dev_name(sysmmu
));
1614 of_node_put(pb_args
.np
);
1619 while (!list_empty(&data
->pb_grp_list
)) {
1620 pb
= list_entry(data
->pb_grp_list
.next
,
1621 struct pb_info
, node
);
1622 list_del(&pb
->node
);
1634 static int __init
__sysmmu_init_prop(struct device
*sysmmu
,
1635 struct sysmmu_drvdata
*drvdata
)
1637 struct device_node
*prop_node
;
1639 unsigned int qos
= DEFAULT_QOS_VALUE
;
1642 ret
= of_property_read_u32_index(sysmmu
->of_node
, "qos", 0, &qos
);
1644 if ((ret
== 0) && (qos
> 15)) {
1645 dev_err(sysmmu
, "%s: Invalid QoS value %d specified\n",
1647 qos
= DEFAULT_QOS_VALUE
;
1650 drvdata
->qos
= (short)qos
;
1653 * Deprecate 'prop-map' child node of System MMU device nodes in FDT.
1654 * It is not required to introduce new child node for boolean
1655 * properties like 'block-stop' and 'tlbinv-nonblock'.
1656 * 'tlbinv-nonblock' is H/W W/A to accellerates master H/W performance
1657 * for 5.x and the earlier versions of System MMU.x.
1658 * 'sysmmu,tlbinv-nonblock' is introduced, instead for those earlier
1660 * Instead of 'block-stop' in 'prop-map' childe node,
1661 * 'sysmmu,block-when-stop' without a value is introduced to simplify
1662 * the FDT node definitions.
1663 * For the compatibility with the existing FDT files, the 'prop-map'
1664 * child node parsing is still kept.
1666 prop_node
= of_get_child_by_name(sysmmu
->of_node
, "prop-map");
1668 if (!of_property_read_string(prop_node
, "tlbinv-nonblock", &s
))
1669 if (strnicmp(s
, "yes", 3) == 0)
1670 drvdata
->prop
|= SYSMMU_PROP_NONBLOCK_TLBINV
;
1672 if (!of_property_read_string(prop_node
, "block-stop", &s
))
1673 if (strnicmp(s
, "yes", 3) == 0)
1674 drvdata
->prop
|= SYSMMU_PROP_STOP_BLOCK
;
1676 of_node_put(prop_node
);
1679 if (of_find_property(sysmmu
->of_node
, "sysmmu,block-when-stop", NULL
))
1680 drvdata
->prop
|= SYSMMU_PROP_STOP_BLOCK
;
1682 if (of_find_property(sysmmu
->of_node
, "sysmmu,tlbinv-nonblock", NULL
))
1683 drvdata
->prop
|= SYSMMU_PROP_NONBLOCK_TLBINV
;
1685 if (of_find_property(sysmmu
->of_node
, "sysmmu,acg_disable", NULL
))
1686 drvdata
->prop
|= SYSMMU_PROP_DISABLE_ACG
;
1691 static int __init
__sysmmu_setup(struct device
*sysmmu
,
1692 struct sysmmu_drvdata
*drvdata
)
1696 ret
= __sysmmu_init_prop(sysmmu
, drvdata
);
1698 dev_err(sysmmu
, "Failed to initialize sysmmu properties\n");
1702 ret
= __sysmmu_init_clock(sysmmu
, drvdata
);
1704 dev_err(sysmmu
, "Failed to initialize gating clocks\n");
1708 ret
= __sysmmu_init_master_info(sysmmu
, drvdata
);
1711 for (i
= 0; i
< SYSMMU_CLK_NUM
; i
++) {
1712 if (!IS_ERR(drvdata
->clocks
[i
]))
1713 clk_unprepare(drvdata
->clocks
[i
]);
1715 dev_err(sysmmu
, "Failed to initialize master device.\n");
1721 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
1724 struct device
*dev
= &pdev
->dev
;
1725 struct sysmmu_drvdata
*data
;
1726 struct resource
*res
;
1728 data
= devm_kzalloc(dev
, sizeof(*data
) , GFP_KERNEL
);
1730 dev_err(dev
, "Not enough memory\n");
1734 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1736 dev_err(dev
, "Unable to find IOMEM region\n");
1740 data
->sfrbase
= devm_request_and_ioremap(dev
, res
);
1741 if (!data
->sfrbase
) {
1742 dev_err(dev
, "Unable to map IOMEM @ PA:%pa\n", &res
->start
);
1746 ret
= platform_get_irq(pdev
, 0);
1748 dev_err(dev
, "Unable to find IRQ resource\n");
1752 ret
= devm_request_irq(dev
, ret
, exynos_sysmmu_irq
, 0,
1753 dev_name(dev
), data
);
1755 dev_err(dev
, "Unabled to register interrupt handler\n");
1759 pm_runtime_enable(dev
);
1761 ret
= exynos_iommu_init_event_log(SYSMMU_DRVDATA_TO_LOG(data
),
1764 sysmmu_add_log_to_debugfs(exynos_sysmmu_debugfs_root
,
1765 SYSMMU_DRVDATA_TO_LOG(data
), dev_name(dev
));
1769 ret
= __sysmmu_setup(dev
, data
);
1771 if (!pm_runtime_enabled(dev
))
1772 get_sysmmu_runtime_active(data
);
1774 ATOMIC_INIT_NOTIFIER_HEAD(&data
->fault_notifiers
);
1775 spin_lock_init(&data
->lock
);
1776 if (!sysmmu_drvdata_list
) {
1777 sysmmu_drvdata_list
= data
;
1779 data
->next
= sysmmu_drvdata_list
->next
;
1780 sysmmu_drvdata_list
->next
= data
;
1783 platform_set_drvdata(pdev
, data
);
1785 dev_info(dev
, "[OK]\n");
1792 static struct of_device_id sysmmu_of_match
[] __initconst
= {
1793 { .compatible
= "samsung,exynos7420-sysmmu", },
1798 static struct platform_driver exynos_sysmmu_driver __refdata
= {
1799 .probe
= exynos_sysmmu_probe
,
1801 .owner
= THIS_MODULE
,
1802 .name
= MODULE_NAME
,
1803 .of_match_table
= of_match_ptr(sysmmu_of_match
),
1807 static int exynos_iommu_domain_init(struct iommu_domain
*domain
)
1809 struct exynos_iommu_domain
*priv
;
1811 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
1815 priv
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(
1816 GFP_KERNEL
| __GFP_ZERO
, 2);
1820 priv
->lv2entcnt
= (atomic_t
*)__get_free_pages(
1821 GFP_KERNEL
| __GFP_ZERO
, 2);
1822 if (!priv
->lv2entcnt
)
1825 if (exynos_iommu_init_event_log(IOMMU_PRIV_TO_LOG(priv
), IOMMU_LOG_LEN
))
1826 goto err_init_event_log
;
1828 pgtable_flush(priv
->pgtable
, priv
->pgtable
+ NUM_LV1ENTRIES
);
1830 spin_lock_init(&priv
->lock
);
1831 spin_lock_init(&priv
->pgtablelock
);
1832 INIT_LIST_HEAD(&priv
->clients
);
1834 domain
->priv
= priv
;
1835 domain
->handler
= recover_fault_handler
;
1839 free_pages((unsigned long)priv
->lv2entcnt
, 2);
1841 free_pages((unsigned long)priv
->pgtable
, 2);
1847 static void exynos_iommu_domain_destroy(struct iommu_domain
*domain
)
1849 struct exynos_iommu_domain
*priv
= domain
->priv
;
1850 struct exynos_iommu_owner
*owner
;
1851 unsigned long flags
;
1854 WARN_ON(!list_empty(&priv
->clients
));
1856 spin_lock_irqsave(&priv
->lock
, flags
);
1858 list_for_each_entry(owner
, &priv
->clients
, client
)
1859 while (!exynos_sysmmu_disable(owner
->dev
))
1860 ; /* until System MMU is actually disabled */
1862 while (!list_empty(&priv
->clients
))
1863 list_del_init(priv
->clients
.next
);
1865 spin_unlock_irqrestore(&priv
->lock
, flags
);
1867 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
1868 if (lv1ent_page(priv
->pgtable
+ i
))
1869 kmem_cache_free(lv2table_kmem_cache
,
1870 __va(lv2table_base(priv
->pgtable
+ i
)));
1872 free_pages((unsigned long)priv
->pgtable
, 2);
1873 free_pages((unsigned long)priv
->lv2entcnt
, 2);
1874 kfree(domain
->priv
);
1875 domain
->priv
= NULL
;
1878 static int exynos_iommu_attach_device(struct iommu_domain
*domain
,
1881 struct exynos_iommu_domain
*priv
= domain
->priv
;
1882 phys_addr_t pgtable
= virt_to_phys(priv
->pgtable
);
1883 unsigned long flags
;
1886 spin_lock_irqsave(&priv
->lock
, flags
);
1888 ret
= __exynos_sysmmu_enable(dev
, __pa(priv
->pgtable
), domain
);
1890 spin_unlock_irqrestore(&priv
->lock
, flags
);
1893 dev_err(dev
, "%s: Failed to attach IOMMU with pgtable %pa\n",
1894 __func__
, &pgtable
);
1896 SYSMMU_EVENT_LOG_IOMMU_ATTACH(IOMMU_PRIV_TO_LOG(priv
), dev
);
1898 "%s: Attached new IOMMU with pgtable %pa %s\n",
1899 __func__
, &pgtable
, (ret
== 0) ? "" : ", again");
1906 static void exynos_iommu_detach_device(struct iommu_domain
*domain
,
1909 struct exynos_iommu_owner
*owner
;
1910 struct exynos_iommu_domain
*priv
= domain
->priv
;
1911 unsigned long flags
;
1913 spin_lock_irqsave(&priv
->lock
, flags
);
1915 list_for_each_entry(owner
, &priv
->clients
, client
) {
1916 if (owner
== dev
->archdata
.iommu
) {
1917 exynos_sysmmu_disable(owner
->dev
);
1922 spin_unlock_irqrestore(&priv
->lock
, flags
);
1924 if (owner
== dev
->archdata
.iommu
) {
1925 SYSMMU_EVENT_LOG_IOMMU_DETACH(IOMMU_PRIV_TO_LOG(priv
), dev
);
1926 TRACE_LOG_DEV(dev
, "%s: Detached IOMMU with pgtable %#lx\n",
1927 __func__
, __pa(priv
->pgtable
));
1929 dev_err(dev
, "%s: No IOMMU is attached\n", __func__
);
1933 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*priv
,
1934 sysmmu_pte_t
*sent
, unsigned long iova
, atomic_t
*pgcounter
)
1936 if (lv1ent_fault(sent
)) {
1937 unsigned long flags
;
1938 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
1939 if (lv1ent_fault(sent
)) {
1942 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
1943 BUG_ON((unsigned long)pent
& (LV2TABLE_SIZE
- 1));
1945 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1946 return ERR_PTR(-ENOMEM
);
1949 *sent
= mk_lv1ent_page(__pa(pent
));
1950 kmemleak_ignore(pent
);
1951 atomic_set(pgcounter
, NUM_LV2ENTRIES
);
1952 pgtable_flush(pent
, pent
+ NUM_LV2ENTRIES
);
1953 pgtable_flush(sent
, sent
+ 1);
1954 SYSMMU_EVENT_LOG_IOMMU_ALLOCSLPD(IOMMU_PRIV_TO_LOG(priv
),
1957 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
1958 } else if (!lv1ent_page(sent
)) {
1960 return ERR_PTR(-EADDRINUSE
);
1963 return page_entry(sent
, iova
);
1966 static int lv1ent_check_page(struct exynos_iommu_domain
*priv
,
1967 sysmmu_pte_t
*sent
, atomic_t
*pgcnt
)
1969 if (lv1ent_page(sent
)) {
1970 if (WARN_ON(atomic_read(pgcnt
) != NUM_LV2ENTRIES
))
1977 static void clear_lv1_page_table(sysmmu_pte_t
*ent
, int n
)
1980 memset(ent
, 0, sizeof(*ent
) * n
);
1983 static void clear_lv2_page_table(sysmmu_pte_t
*ent
, int n
)
1986 memset(ent
, 0, sizeof(*ent
) * n
);
1989 static int lv1set_section(struct exynos_iommu_domain
*priv
,
1990 sysmmu_pte_t
*sent
, phys_addr_t paddr
,
1991 size_t size
, atomic_t
*pgcnt
)
1995 if (WARN_ON(!lv1ent_fault(sent
) && !lv1ent_page(sent
)))
1998 if (size
== SECT_SIZE
) {
1999 ret
= lv1ent_check_page(priv
, sent
, pgcnt
);
2002 *sent
= mk_lv1ent_sect(paddr
);
2003 pgtable_flush(sent
, sent
+ 1);
2004 } else if (size
== DSECT_SIZE
) {
2006 for (i
= 0; i
< SECT_PER_DSECT
; i
++, sent
++, pgcnt
++) {
2007 ret
= lv1ent_check_page(priv
, sent
, pgcnt
);
2009 clear_lv1_page_table(sent
- i
, i
);
2012 *sent
= mk_lv1ent_dsect(paddr
);
2014 pgtable_flush(sent
- SECT_PER_DSECT
, sent
);
2017 for (i
= 0; i
< SECT_PER_SPSECT
; i
++, sent
++, pgcnt
++) {
2018 ret
= lv1ent_check_page(priv
, sent
, pgcnt
);
2020 clear_lv1_page_table(sent
- i
, i
);
2023 *sent
= mk_lv1ent_spsect(paddr
);
2025 pgtable_flush(sent
- SECT_PER_SPSECT
, sent
);
2031 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
,
2032 size_t size
, atomic_t
*pgcnt
)
2034 if (size
== SPAGE_SIZE
) {
2035 if (WARN_ON(!lv2ent_fault(pent
)))
2038 *pent
= mk_lv2ent_spage(paddr
);
2039 pgtable_flush(pent
, pent
+ 1);
2041 } else { /* size == LPAGE_SIZE */
2043 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
2044 if (WARN_ON(!lv2ent_fault(pent
))) {
2045 clear_lv2_page_table(pent
- i
, i
);
2049 *pent
= mk_lv2ent_lpage(paddr
);
2051 pgtable_flush(pent
- SPAGES_PER_LPAGE
, pent
);
2052 atomic_sub(SPAGES_PER_LPAGE
, pgcnt
);
2058 static int exynos_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
2059 phys_addr_t paddr
, size_t size
, int prot
)
2061 struct exynos_iommu_domain
*priv
= domain
->priv
;
2062 sysmmu_pte_t
*entry
;
2065 BUG_ON(priv
->pgtable
== NULL
);
2067 entry
= section_entry(priv
->pgtable
, iova
);
2069 if (size
>= SECT_SIZE
) {
2070 ret
= lv1set_section(priv
, entry
, paddr
, size
,
2071 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
2073 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv
),
2074 iova
, iova
+ size
, paddr
/ SPAGE_SIZE
);
2077 pent
= alloc_lv2entry(priv
, entry
, iova
,
2078 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
2080 ret
= PTR_ERR(pent
);
2082 ret
= lv2set_page(pent
, paddr
, size
,
2083 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
2085 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv
),
2086 iova
, iova
+ size
, paddr
/ SPAGE_SIZE
);
2091 pr_err("%s: Failed(%d) to map %#zx bytes @ %pa\n",
2092 __func__
, ret
, size
, &iova
);
2098 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*priv
,
2101 struct exynos_iommu_owner
*owner
;
2102 unsigned long flags
;
2104 spin_lock_irqsave(&priv
->lock
, flags
);
2106 list_for_each_entry(owner
, &priv
->clients
, client
)
2107 sysmmu_tlb_invalidate_entry(owner
->dev
, iova
, false);
2109 spin_unlock_irqrestore(&priv
->lock
, flags
);
2112 static size_t exynos_iommu_unmap(struct iommu_domain
*domain
,
2113 unsigned long iova
, size_t size
)
2115 struct exynos_iommu_domain
*priv
= domain
->priv
;
2117 sysmmu_pte_t
*sent
, *pent
;
2118 atomic_t
*lv2entcnt
= &priv
->lv2entcnt
[lv1ent_offset(iova
)];
2120 BUG_ON(priv
->pgtable
== NULL
);
2122 sent
= section_entry(priv
->pgtable
, iova
);
2124 if (lv1ent_spsection(sent
)) {
2125 if (WARN_ON(size
< SPSECT_SIZE
)) {
2126 err_pgsize
= SPSECT_SIZE
;
2130 clear_lv1_page_table(sent
, SECT_PER_SPSECT
);
2132 pgtable_flush(sent
, sent
+ SECT_PER_SPSECT
);
2137 if (lv1ent_dsection(sent
)) {
2138 if (WARN_ON(size
< DSECT_SIZE
)) {
2139 err_pgsize
= DSECT_SIZE
;
2145 pgtable_flush(sent
, sent
+ 2);
2150 if (lv1ent_section(sent
)) {
2151 if (WARN_ON(size
< SECT_SIZE
)) {
2152 err_pgsize
= SECT_SIZE
;
2157 pgtable_flush(sent
, sent
+ 1);
2162 if (unlikely(lv1ent_fault(sent
))) {
2163 if (size
> SECT_SIZE
)
2168 /* lv1ent_page(sent) == true here */
2170 pent
= page_entry(sent
, iova
);
2172 if (unlikely(lv2ent_fault(pent
))) {
2177 if (lv2ent_small(pent
)) {
2180 pgtable_flush(pent
, pent
+ 1);
2181 atomic_inc(lv2entcnt
);
2185 /* lv1ent_large(ent) == true here */
2186 if (WARN_ON(size
< LPAGE_SIZE
)) {
2187 err_pgsize
= LPAGE_SIZE
;
2191 clear_lv2_page_table(pent
, SPAGES_PER_LPAGE
);
2192 pgtable_flush(pent
, pent
+ SPAGES_PER_LPAGE
);
2194 atomic_add(SPAGES_PER_LPAGE
, lv2entcnt
);
2197 if (atomic_read(lv2entcnt
) == NUM_LV2ENTRIES
) {
2198 unsigned long flags
;
2199 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
2200 if (atomic_read(lv2entcnt
) == NUM_LV2ENTRIES
) {
2201 kmem_cache_free(lv2table_kmem_cache
,
2202 page_entry(sent
, 0));
2203 atomic_set(lv2entcnt
, 0);
2206 SYSMMU_EVENT_LOG_IOMMU_FREESLPD(IOMMU_PRIV_TO_LOG(priv
), iova_from_sent(priv
->pgtable
, sent
));
2208 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
2212 SYSMMU_EVENT_LOG_IOMMU_UNMAP(IOMMU_PRIV_TO_LOG(priv
),
2215 exynos_iommu_tlb_invalidate_entry(priv
, iova
);
2217 /* TLB invalidation is performed by IOVMM */
2221 pr_err("%s: Failed: size(%#zx) @ %pa is smaller than page size %#zx\n",
2222 __func__
, size
, &iova
, err_pgsize
);
2227 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*domain
,
2230 struct exynos_iommu_domain
*priv
= domain
->priv
;
2231 sysmmu_pte_t
*entry
;
2232 phys_addr_t phys
= 0;
2234 entry
= section_entry(priv
->pgtable
, iova
);
2236 if (lv1ent_spsection(entry
)) {
2237 phys
= spsection_phys(entry
) + spsection_offs(iova
);
2238 } else if (lv1ent_dsection(entry
)) {
2239 phys
= dsection_phys(entry
) + dsection_offs(iova
);
2240 } else if (lv1ent_section(entry
)) {
2241 phys
= section_phys(entry
) + section_offs(iova
);
2242 } else if (lv1ent_page(entry
)) {
2243 entry
= page_entry(entry
, iova
);
2245 if (lv2ent_large(entry
))
2246 phys
= lpage_phys(entry
) + lpage_offs(iova
);
2247 else if (lv2ent_small(entry
))
2248 phys
= spage_phys(entry
) + spage_offs(iova
);
2254 static struct iommu_ops exynos_iommu_ops
= {
2255 .domain_init
= &exynos_iommu_domain_init
,
2256 .domain_destroy
= &exynos_iommu_domain_destroy
,
2257 .attach_dev
= &exynos_iommu_attach_device
,
2258 .detach_dev
= &exynos_iommu_detach_device
,
2259 .map
= &exynos_iommu_map
,
2260 .unmap
= &exynos_iommu_unmap
,
2261 .iova_to_phys
= &exynos_iommu_iova_to_phys
,
2262 .pgsize_bitmap
= PGSIZE_BITMAP
,
2265 static int __sysmmu_unmap_user_pages(struct device
*dev
,
2266 struct mm_struct
*mm
,
2267 unsigned long vaddr
,
2271 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
2272 struct exynos_iovmm
*vmm
= owner
->vmm_data
;
2273 struct iommu_domain
*domain
= vmm
->domain
;
2274 struct exynos_iommu_domain
*priv
= domain
->priv
;
2275 struct vm_area_struct
*vma
;
2276 unsigned long start
= vaddr
& PAGE_MASK
;
2277 unsigned long end
= PAGE_ALIGN(vaddr
+ size
);
2279 sysmmu_pte_t
*sent
, *pent
;
2282 down_read(&mm
->mmap_sem
);
2284 BUG_ON((vaddr
+ size
) < vaddr
);
2286 * Assumes that the VMA is safe.
2287 * The caller must check the range of address space before calling this.
2289 vma
= find_vma(mm
, vaddr
);
2291 pr_err("%s: vma is null\n", __func__
);
2296 if (vma
->vm_end
< (vaddr
+ size
)) {
2297 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
2298 __func__
, vma
->vm_start
, vma
->vm_end
, vaddr
, size
);
2303 is_pfnmap
= vma
->vm_flags
& VM_PFNMAP
;
2305 TRACE_LOG_DEV(dev
, "%s: unmap starts @ %#zx@%#lx\n",
2306 __func__
, size
, start
);
2309 sysmmu_pte_t
*pent_first
;
2311 sent
= section_entry(priv
->pgtable
, iova
);
2312 if (lv1ent_fault(sent
)) {
2317 pent
= page_entry(sent
, iova
);
2318 if (lv2ent_fault(pent
)) {
2326 if (!lv2ent_fault(pent
) && !is_pfnmap
)
2327 put_page(phys_to_page(spage_phys(pent
)));
2330 if (lv2ent_offset(iova
) == NUM_LV2ENTRIES
- 1) {
2331 pgtable_flush(pent_first
, pent
);
2333 sent
= section_entry(priv
->pgtable
, iova
);
2334 if (lv1ent_fault(sent
)) {
2339 pent
= page_entry(sent
, iova
);
2340 if (lv2ent_fault(pent
)) {
2350 } while (start
+= PAGE_SIZE
, start
!= end
);
2352 if (pent_first
!= pent
)
2353 pgtable_flush(pent_first
, pent
);
2354 } while (start
!= end
);
2356 TRACE_LOG_DEV(dev
, "%s: unmap done @ %#lx\n", __func__
, start
);
2359 up_read(&mm
->mmap_sem
);
2362 pr_debug("%s: Ignoring unmapping for %#lx ~ %#lx\n",
2363 __func__
, start
, end
);
2369 static sysmmu_pte_t
*alloc_lv2entry_fast(struct exynos_iommu_domain
*priv
,
2370 sysmmu_pte_t
*sent
, unsigned long iova
)
2372 if (lv1ent_fault(sent
)) {
2375 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
2376 BUG_ON((unsigned long)pent
& (LV2TABLE_SIZE
- 1));
2378 return ERR_PTR(-ENOMEM
);
2380 *sent
= mk_lv1ent_page(virt_to_phys(pent
));
2381 kmemleak_ignore(pent
);
2382 pgtable_flush(sent
, sent
+ 1);
2383 } else if (WARN_ON(!lv1ent_page(sent
))) {
2384 return ERR_PTR(-EADDRINUSE
);
2387 return page_entry(sent
, iova
);
2390 int exynos_sysmmu_map_user_pages(struct device
*dev
,
2391 struct mm_struct
*mm
,
2392 unsigned long vaddr
,
2394 size_t size
, bool write
,
2397 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
2398 struct exynos_iovmm
*vmm
= owner
->vmm_data
;
2399 struct iommu_domain
*domain
= vmm
->domain
;
2400 struct exynos_iommu_domain
*priv
= domain
->priv
;
2401 exynos_iova_t iova_start
= iova
;
2402 struct vm_area_struct
*vma
;
2403 unsigned long start
, end
;
2404 unsigned long pgd_next
;
2409 if (WARN_ON(size
== 0))
2412 down_read(&mm
->mmap_sem
);
2415 * Assumes that the VMA is safe.
2416 * The caller must check the range of address space before calling this.
2418 vma
= find_vma(mm
, vaddr
);
2420 pr_err("%s: vma is null\n", __func__
);
2421 up_read(&mm
->mmap_sem
);
2425 if (vma
->vm_end
< (vaddr
+ size
)) {
2426 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
2427 __func__
, vma
->vm_start
, vma
->vm_end
, vaddr
, size
);
2428 up_read(&mm
->mmap_sem
);
2432 is_pfnmap
= vma
->vm_flags
& VM_PFNMAP
;
2434 start
= vaddr
& PAGE_MASK
;
2435 end
= PAGE_ALIGN(vaddr
+ size
);
2437 TRACE_LOG_DEV(dev
, "%s: map @ %#lx--%#lx, %zd bytes, vm_flags: %#lx\n",
2438 __func__
, start
, end
, size
, vma
->vm_flags
);
2440 pgd
= pgd_offset(mm
, start
);
2442 unsigned long pmd_next
;
2445 if (pgd_none_or_clear_bad(pgd
)) {
2450 pgd_next
= pgd_addr_end(start
, end
);
2451 pmd
= pmd_offset((pud_t
*)pgd
, start
);
2455 sysmmu_pte_t
*pent
, *pent_first
;
2459 if (pmd_none(*pmd
)) {
2460 pmd
= pmd_alloc(mm
, (pud_t
*)pgd
, start
);
2462 pr_err("%s: failed to alloc pmd\n",
2468 if (__pte_alloc(mm
, vma
, pmd
, start
)) {
2469 pr_err("%s: failed to alloc pte\n",
2474 } else if (pmd_bad(*pmd
)) {
2475 pr_err("%s: bad pmd value %#lx\n", __func__
,
2476 (unsigned long)pmd_val(*pmd
));
2482 pmd_next
= pmd_addr_end(start
, pgd_next
);
2483 pte
= pte_offset_map(pmd
, start
);
2485 sent
= section_entry(priv
->pgtable
, iova
);
2486 pent
= alloc_lv2entry_fast(priv
, sent
, iova
);
2488 ret
= PTR_ERR(pent
); /* ENOMEM or EADDRINUSE */
2493 ptl
= pte_lockptr(mm
, pmd
);
2497 WARN_ON(!lv2ent_fault(pent
));
2499 if (!pte_present(*pte
) ||
2500 (write
&& !pte_write(*pte
))) {
2501 if (pte_present(*pte
) || pte_none(*pte
)) {
2503 ret
= handle_pte_fault(mm
,
2504 vma
, start
, pte
, pmd
,
2505 write
? FAULT_FLAG_WRITE
: 0);
2506 if (IS_ERR_VALUE(ret
)) {
2514 if (!pte_present(*pte
) ||
2515 (write
&& !pte_write(*pte
))) {
2522 get_page(pte_page(*pte
));
2523 *pent
= mk_lv2ent_spage(__pfn_to_phys(
2526 set_lv2ent_shareable(pent
);
2528 if (lv2ent_offset(iova
) == (NUM_LV2ENTRIES
- 1)) {
2529 pgtable_flush(pent_first
, pent
);
2531 sent
= section_entry(priv
->pgtable
, iova
);
2532 pent
= alloc_lv2entry_fast(priv
, sent
, iova
);
2534 ret
= PTR_ERR(pent
);
2543 } while (pte
++, start
+= PAGE_SIZE
, start
< pmd_next
);
2545 if (pent_first
!= pent
)
2546 pgtable_flush(pent_first
, pent
);
2548 } while (pmd
++, start
= pmd_next
, start
!= pgd_next
);
2550 } while (pgd
++, start
= pgd_next
, start
!= end
);
2554 up_read(&mm
->mmap_sem
);
2557 pr_debug("%s: Ignoring mapping for %#lx ~ %#lx\n",
2558 __func__
, start
, end
);
2559 __sysmmu_unmap_user_pages(dev
, mm
, vaddr
, iova_start
,
2560 start
- (vaddr
& PAGE_MASK
));
2566 int exynos_sysmmu_unmap_user_pages(struct device
*dev
,
2567 struct mm_struct
*mm
,
2568 unsigned long vaddr
,
2572 if (WARN_ON(size
== 0))
2575 return __sysmmu_unmap_user_pages(dev
, mm
, vaddr
, iova
, size
);
2578 static int __init
exynos_iommu_create_domain(void)
2580 struct device_node
*domain
;
2582 for_each_compatible_node(domain
, NULL
, "samsung,exynos-iommu-bus") {
2583 struct device_node
*np
;
2584 struct exynos_iovmm
*vmm
= NULL
;
2587 while ((np
= of_parse_phandle(domain
, "domain-clients", i
++))) {
2588 struct platform_device
*master
=
2589 of_find_device_by_node(np
);
2590 struct exynos_iommu_owner
*owner
;
2591 struct exynos_iommu_domain
*priv
;
2594 pr_err("%s: master IP in '%s' not found\n",
2595 __func__
, np
->name
);
2597 of_node_put(domain
);
2601 owner
= (struct exynos_iommu_owner
*)
2602 master
->dev
.archdata
.iommu
;
2604 pr_err("%s: No System MMU attached for %s\n",
2605 __func__
, np
->name
);
2611 vmm
= exynos_create_single_iovmm(np
->name
);
2613 pr_err("%s: Failed to create IOVM space\
2615 __func__
, np
->name
);
2617 of_node_put(domain
);
2622 priv
= (struct exynos_iommu_domain
*)vmm
->domain
->priv
;
2624 owner
->vmm_data
= vmm
;
2625 spin_lock(&priv
->lock
);
2626 list_add_tail(&owner
->client
, &priv
->clients
);
2627 spin_unlock(&priv
->lock
);
2631 dev_err(&master
->dev
,
2632 "create IOVMM device node : %s\n", np
->name
);
2634 of_node_put(domain
);
2639 static int __init
exynos_iommu_init(void)
2644 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
2645 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
2646 if (!lv2table_kmem_cache
) {
2647 pr_err("%s: failed to create kmem cache\n", __func__
);
2651 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
2653 pr_err("%s: failed to allocate fault page\n", __func__
);
2654 goto err_fault_page
;
2656 fault_page
= page_to_phys(page
);
2658 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
2660 pr_err("%s: Failed to register IOMMU ops\n", __func__
);
2664 exynos_sysmmu_debugfs_root
= debugfs_create_dir("sysmmu", NULL
);
2665 if (!exynos_sysmmu_debugfs_root
)
2666 pr_err("%s: Failed to create debugfs entry\n", __func__
);
2668 ret
= platform_driver_register(&exynos_sysmmu_driver
);
2670 pr_err("%s: Failed to register System MMU driver.\n", __func__
);
2671 goto err_driver_register
;
2674 ret
= exynos_iommu_create_domain();
2675 if (ret
&& (ret
!= -ENOENT
)) {
2676 pr_err("%s: Failed to create iommu domain\n", __func__
);
2677 platform_driver_unregister(&exynos_sysmmu_driver
);
2678 goto err_driver_register
;
2682 err_driver_register
:
2683 bus_set_iommu(&platform_bus_type
, NULL
);
2687 kmem_cache_destroy(lv2table_kmem_cache
);
2690 arch_initcall_sync(exynos_iommu_init
);
2692 #ifdef CONFIG_PM_SLEEP
2693 static int sysmmu_pm_genpd_suspend(struct device
*dev
)
2695 struct sysmmu_list_data
*list
;
2698 TRACE_LOG("%s(%s) ----->\n", __func__
, dev_name(dev
));
2700 ret
= pm_generic_suspend(dev
);
2702 TRACE_LOG("<----- %s(%s) Failed\n", __func__
, dev_name(dev
));
2706 for_each_sysmmu_list(dev
, list
) {
2707 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
2708 unsigned long flags
;
2709 TRACE_LOG("Suspending %s...\n", dev_name(drvdata
->sysmmu
));
2710 spin_lock_irqsave(&drvdata
->lock
, flags
);
2711 if (!drvdata
->suspended
&& is_sysmmu_active(drvdata
) &&
2712 (!pm_runtime_enabled(dev
) ||
2713 is_sysmmu_runtime_active(drvdata
)))
2714 __sysmmu_disable_nocount(drvdata
);
2715 drvdata
->suspended
= true;
2716 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
2719 TRACE_LOG("<----- %s(%s)\n", __func__
, dev_name(dev
));
2724 static int sysmmu_pm_genpd_resume(struct device
*dev
)
2726 struct sysmmu_list_data
*list
;
2729 TRACE_LOG("%s(%s) ----->\n", __func__
, dev_name(dev
));
2731 for_each_sysmmu_list(dev
, list
) {
2732 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
2733 unsigned long flags
;
2734 spin_lock_irqsave(&drvdata
->lock
, flags
);
2735 if (drvdata
->suspended
&& is_sysmmu_active(drvdata
) &&
2736 (!pm_runtime_enabled(dev
) ||
2737 is_sysmmu_runtime_active(drvdata
)))
2738 __sysmmu_enable_nocount(drvdata
);
2739 drvdata
->suspended
= false;
2740 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
2743 ret
= pm_generic_resume(dev
);
2745 TRACE_LOG("<----- %s(%s) OK\n", __func__
, dev_name(dev
));
2751 #ifdef CONFIG_PM_RUNTIME
2752 static void sysmmu_restore_state(struct device
*dev
)
2754 struct sysmmu_list_data
*list
;
2756 for_each_sysmmu_list(dev
, list
) {
2757 struct sysmmu_drvdata
*data
= dev_get_drvdata(list
->sysmmu
);
2758 unsigned long flags
;
2760 TRACE_LOG("%s(%s)\n", __func__
, dev_name(data
->sysmmu
));
2762 SYSMMU_EVENT_LOG_POWERON(SYSMMU_DRVDATA_TO_LOG(data
));
2764 spin_lock_irqsave(&data
->lock
, flags
);
2765 if (get_sysmmu_runtime_active(data
) && is_sysmmu_active(data
))
2766 __sysmmu_enable_nocount(data
);
2767 spin_unlock_irqrestore(&data
->lock
, flags
);
2771 static void sysmmu_save_state(struct device
*dev
)
2773 struct sysmmu_list_data
*list
;
2775 for_each_sysmmu_list(dev
, list
) {
2776 struct sysmmu_drvdata
*data
= dev_get_drvdata(list
->sysmmu
);
2777 unsigned long flags
;
2779 TRACE_LOG("%s(%s)\n", __func__
, dev_name(data
->sysmmu
));
2781 SYSMMU_EVENT_LOG_POWEROFF(SYSMMU_DRVDATA_TO_LOG(data
));
2783 spin_lock_irqsave(&data
->lock
, flags
);
2784 if (put_sysmmu_runtime_active(data
) && is_sysmmu_active(data
))
2785 __sysmmu_disable_nocount(data
);
2786 spin_unlock_irqrestore(&data
->lock
, flags
);
2790 static int sysmmu_pm_genpd_save_state(struct device
*dev
)
2792 int (*cb
)(struct device
*__dev
);
2795 TRACE_LOG("%s(%s) ----->\n", __func__
, dev_name(dev
));
2797 if (dev
->type
&& dev
->type
->pm
)
2798 cb
= dev
->type
->pm
->runtime_suspend
;
2799 else if (dev
->class && dev
->class->pm
)
2800 cb
= dev
->class->pm
->runtime_suspend
;
2801 else if (dev
->bus
&& dev
->bus
->pm
)
2802 cb
= dev
->bus
->pm
->runtime_suspend
;
2806 if (!cb
&& dev
->driver
&& dev
->driver
->pm
)
2807 cb
= dev
->driver
->pm
->runtime_suspend
;
2813 sysmmu_save_state(dev
);
2815 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__
, dev_name(dev
),
2816 cb
, ret
? "Failed" : "OK");
2821 static int sysmmu_pm_genpd_restore_state(struct device
*dev
)
2823 int (*cb
)(struct device
*__dev
);
2826 TRACE_LOG("%s(%s) ----->\n", __func__
, dev_name(dev
));
2828 if (dev
->type
&& dev
->type
->pm
)
2829 cb
= dev
->type
->pm
->runtime_resume
;
2830 else if (dev
->class && dev
->class->pm
)
2831 cb
= dev
->class->pm
->runtime_resume
;
2832 else if (dev
->bus
&& dev
->bus
->pm
)
2833 cb
= dev
->bus
->pm
->runtime_resume
;
2837 if (!cb
&& dev
->driver
&& dev
->driver
->pm
)
2838 cb
= dev
->driver
->pm
->runtime_resume
;
2840 sysmmu_restore_state(dev
);
2846 sysmmu_save_state(dev
);
2848 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__
, dev_name(dev
),
2849 cb
, ret
? "Failed" : "OK");
2855 #ifdef CONFIG_PM_GENERIC_DOMAINS
2856 static struct gpd_dev_ops sysmmu_devpm_ops
= {
2857 #ifdef CONFIG_PM_RUNTIME
2858 .save_state
= &sysmmu_pm_genpd_save_state
,
2859 .restore_state
= &sysmmu_pm_genpd_restore_state
,
2861 #ifdef CONFIG_PM_SLEEP
2862 .suspend
= &sysmmu_pm_genpd_suspend
,
2863 .resume
= &sysmmu_pm_genpd_resume
,
2867 static int sysmmu_hook_driver_register(struct notifier_block
*nb
,
2871 struct device
*dev
= p
;
2874 * No System MMU assigned. See exynos_sysmmu_probe().
2876 if (dev
->archdata
.iommu
== NULL
)
2880 case BUS_NOTIFY_BIND_DRIVER
:
2882 if (dev
->pm_domain
) {
2883 int ret
= pm_genpd_add_callbacks(
2884 dev
, &sysmmu_devpm_ops
, NULL
);
2885 if (ret
&& (ret
!= -ENOSYS
)) {
2887 "Failed to register 'dev_pm_ops' for iommu\n");
2891 dev_info(dev
, "exynos-iommu gpd_dev_ops inserted!\n");
2896 case BUS_NOTIFY_BOUND_DRIVER
:
2898 struct sysmmu_list_data
*list
;
2900 if (pm_runtime_enabled(dev
) && dev
->pm_domain
)
2903 for_each_sysmmu_list(dev
, list
) {
2904 struct sysmmu_drvdata
*data
=
2905 dev_get_drvdata(list
->sysmmu
);
2906 unsigned long flags
;
2907 spin_lock_irqsave(&data
->lock
, flags
);
2908 if (is_sysmmu_active(data
) &&
2909 get_sysmmu_runtime_active(data
))
2910 __sysmmu_enable_nocount(data
);
2911 pm_runtime_disable(data
->sysmmu
);
2912 spin_unlock_irqrestore(&data
->lock
, flags
);
2917 case BUS_NOTIFY_UNBOUND_DRIVER
:
2919 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
2920 WARN_ON(!list_empty(&owner
->client
));
2921 __pm_genpd_remove_callbacks(dev
, false);
2922 dev_info(dev
, "exynos-iommu gpd_dev_ops removed!\n");
2925 } /* switch (val) */
2930 static struct notifier_block sysmmu_notifier
= {
2931 .notifier_call
= &sysmmu_hook_driver_register
,
2934 static int __init
exynos_iommu_prepare(void)
2936 return bus_register_notifier(&platform_bus_type
, &sysmmu_notifier
);
2938 subsys_initcall_sync(exynos_iommu_prepare
);
2939 #endif /* CONFIG_PM_GENERIC_DOMAINS */
2941 static int sysmmu_fault_notifier(struct notifier_block
*nb
,
2942 unsigned long fault_addr
, void *data
)
2944 struct exynos_iommu_owner
*owner
= NULL
;
2945 struct exynos_iovmm
*vmm
;
2947 owner
= container_of(nb
, struct exynos_iommu_owner
, nb
);
2949 if (owner
&& owner
->fault_handler
) {
2950 vmm
= exynos_get_iovmm(owner
->dev
);
2951 if (vmm
&& vmm
->domain
)
2952 owner
->fault_handler(vmm
->domain
, owner
->dev
,
2953 fault_addr
, (unsigned long)data
,
2960 int exynos_sysmmu_add_fault_notifier(struct device
*dev
,
2961 iommu_fault_handler_t handler
, void *token
)
2963 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
2964 struct sysmmu_list_data
*list
;
2965 struct sysmmu_drvdata
*drvdata
;
2966 unsigned long flags
;
2969 if (!has_sysmmu(dev
)) {
2970 dev_info(dev
, "%s doesn't have sysmmu\n", dev_name(dev
));
2974 spin_lock_irqsave(&owner
->lock
, flags
);
2976 owner
->fault_handler
= handler
;
2977 owner
->token
= token
;
2978 owner
->nb
.notifier_call
= sysmmu_fault_notifier
;
2980 for_each_sysmmu_list(dev
, list
) {
2981 drvdata
= dev_get_drvdata(list
->sysmmu
);
2982 ret
= atomic_notifier_chain_register(
2983 &drvdata
->fault_notifiers
, &owner
->nb
);
2986 "Failed to register %s's fault notifier\n",
2993 spin_unlock_irqrestore(&owner
->lock
, flags
);
2998 for_each_sysmmu_list(dev
, list
) {
2999 drvdata
= dev_get_drvdata(list
->sysmmu
);
3000 atomic_notifier_chain_unregister(
3001 &drvdata
->fault_notifiers
, &owner
->nb
);
3003 spin_unlock_irqrestore(&owner
->lock
, flags
);
3008 static void sysmmu_dump_lv2_page_table(unsigned int lv1idx
, sysmmu_pte_t
*base
)
3011 for (i
= 0; i
< NUM_LV2ENTRIES
; i
+= 4) {
3012 if (!base
[i
] && !base
[i
+ 1] && !base
[i
+ 2] && !base
[i
+ 3])
3014 pr_info(" LV2[%04d][%03d] %08x %08x %08x %08x\n",
3016 base
[i
], base
[i
+ 1], base
[i
+ 2], base
[i
+ 3]);
3020 static void sysmmu_dump_page_table(sysmmu_pte_t
*base
)
3023 phys_addr_t phys_base
= virt_to_phys(base
);
3025 pr_info("---- System MMU Page Table @ %pa ----\n", &phys_base
);
3027 for (i
= 0; i
< NUM_LV1ENTRIES
; i
+= 4) {
3031 pr_info("LV1[%04d] %08x %08x %08x %08x\n",
3032 i
, base
[i
], base
[i
+ 1], base
[i
+ 2], base
[i
+ 3]);
3034 for (j
= 0; j
< 4; j
++)
3035 if (lv1ent_page(&base
[i
+ j
]))
3036 sysmmu_dump_lv2_page_table(i
+ j
,
3037 page_entry(&base
[i
+ j
], 0));
3041 void exynos_sysmmu_show_status(struct device
*dev
)
3043 struct sysmmu_list_data
*list
;
3045 for_each_sysmmu_list(dev
, list
) {
3046 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
3048 if (!is_sysmmu_active(drvdata
) ||
3049 !is_sysmmu_runtime_active(drvdata
)) {
3050 dev_info(drvdata
->sysmmu
,
3051 "%s: System MMU is not active\n", __func__
);
3055 pr_info("DUMPING SYSTEM MMU: %s\n", dev_name(drvdata
->sysmmu
));
3057 __master_clk_enable(drvdata
);
3058 if (sysmmu_block(drvdata
->sfrbase
))
3059 dump_sysmmu_tlb_pb(drvdata
->sfrbase
);
3061 pr_err("!!Failed to block Sytem MMU!\n");
3062 sysmmu_unblock(drvdata
->sfrbase
);
3064 __master_clk_disable(drvdata
);
3068 void exynos_sysmmu_dump_pgtable(struct device
*dev
)
3070 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
3071 struct sysmmu_list_data
*list
=
3072 list_entry(&owner
->mmu_list
, struct sysmmu_list_data
, node
);
3073 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
3075 sysmmu_dump_page_table(phys_to_virt(drvdata
->pgtable
));
3078 void dump_sysmmu_ppc_cnt(struct sysmmu_drvdata
*drvdata
)
3083 pr_crit("------------- System MMU PPC Status --------------\n");
3084 for (i
= 0; i
< drvdata
->event_cnt
; i
++) {
3085 cfg
= __raw_readl(drvdata
->sfrbase
+
3086 REG_PPC_EVENT_SEL(i
));
3087 pr_crit("%s %s %s CNT : %d", dev_name(drvdata
->sysmmu
),
3088 cfg
& 0x10 ? "WRITE" : "READ",
3089 ppc_event_name
[cfg
& 0x7],
3090 __raw_readl(drvdata
->sfrbase
+ REG_PPC_PMCNT(i
)));
3092 pr_crit("--------------------------------------------------\n");
3095 int sysmmu_set_ppc_event(struct sysmmu_drvdata
*drvdata
, int event
)
3099 if (event
< 0 || event
> TOTAL_ID_NUM
||
3100 event
== READ_TLB_MISS
|| event
== WRITE_TLB_MISS
||
3101 event
== READ_FLPD_MISS_PREFETCH
||
3102 event
== WRITE_FLPD_MISS_PREFETCH
)
3105 if (!drvdata
->event_cnt
)
3106 __raw_writel(0x1, drvdata
->sfrbase
+ REG_PPC_PMNC
);
3108 __raw_writel(event
, drvdata
->sfrbase
+
3109 REG_PPC_EVENT_SEL(drvdata
->event_cnt
));
3110 cfg
= __raw_readl(drvdata
->sfrbase
+
3112 __raw_writel(cfg
| 0x1 << drvdata
->event_cnt
,
3113 drvdata
->sfrbase
+ REG_PPC_CNTENS
);
3116 void exynos_sysmmu_show_ppc_event(struct device
*dev
)
3118 struct sysmmu_list_data
*list
;
3119 unsigned long flags
;
3121 for_each_sysmmu_list(dev
, list
) {
3122 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
3124 spin_lock_irqsave(&drvdata
->lock
, flags
);
3125 if (!is_sysmmu_active(drvdata
) || !drvdata
->runtime_active
) {
3126 dev_info(drvdata
->sysmmu
,
3127 "%s: System MMU is not active\n", __func__
);
3128 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
3132 __master_clk_enable(drvdata
);
3133 if (sysmmu_block(drvdata
->sfrbase
))
3134 dump_sysmmu_ppc_cnt(drvdata
);
3136 pr_err("!!Failed to block Sytem MMU!\n");
3137 sysmmu_unblock(drvdata
->sfrbase
);
3138 __master_clk_disable(drvdata
);
3139 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
3143 void exynos_sysmmu_clear_ppc_event(struct device
*dev
)
3145 struct sysmmu_list_data
*list
;
3146 unsigned long flags
;
3148 for_each_sysmmu_list(dev
, list
) {
3149 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
3151 spin_lock_irqsave(&drvdata
->lock
, flags
);
3152 if (!is_sysmmu_active(drvdata
) || !drvdata
->runtime_active
) {
3153 dev_info(drvdata
->sysmmu
,
3154 "%s: System MMU is not active\n", __func__
);
3155 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
3159 __master_clk_enable(drvdata
);
3160 if (sysmmu_block(drvdata
->sfrbase
)) {
3161 dump_sysmmu_ppc_cnt(drvdata
);
3162 __raw_writel(0x2, drvdata
->sfrbase
+ REG_PPC_PMNC
);
3163 __raw_writel(0, drvdata
->sfrbase
+ REG_PPC_CNTENS
);
3164 __raw_writel(0, drvdata
->sfrbase
+ REG_PPC_INTENS
);
3165 drvdata
->event_cnt
= 0;
3167 pr_err("!!Failed to block Sytem MMU!\n");
3168 sysmmu_unblock(drvdata
->sfrbase
);
3169 __master_clk_disable(drvdata
);
3171 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
3175 int exynos_sysmmu_set_ppc_event(struct device
*dev
, int event
)
3177 struct sysmmu_list_data
*list
;
3178 unsigned long flags
;
3181 for_each_sysmmu_list(dev
, list
) {
3182 struct sysmmu_drvdata
*drvdata
= dev_get_drvdata(list
->sysmmu
);
3184 spin_lock_irqsave(&drvdata
->lock
, flags
);
3185 if (!is_sysmmu_active(drvdata
) || !drvdata
->runtime_active
) {
3186 dev_info(drvdata
->sysmmu
,
3187 "%s: System MMU is not active\n", __func__
);
3188 spin_unlock_irqrestore(&drvdata
->lock
, flags
);
3192 __master_clk_enable(drvdata
);
3193 if (sysmmu_block(drvdata
->sfrbase
)) {
3194 if (drvdata
->event_cnt
< MAX_NUM_PPC
) {
3195 ret
= sysmmu_set_ppc_event(drvdata
, event
);
3197 pr_err("Not supported Event ID (%d)",
3200 drvdata
->event_cnt
++;
3203 pr_err("!!Failed to block Sytem MMU!\n");
3204 sysmmu_unblock(drvdata
->sfrbase
);
3205 __master_clk_disable(drvdata
);
3207 spin_unlock_irqrestore(&drvdata
->lock
, flags
);