ARM: Fix build after memfd_create syscall
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / iommu / exynos-iommu-v6.c
1 #include <linux/kernel.h>
2 #include <linux/io.h>
3 #include <linux/interrupt.h>
4 #include <linux/slab.h>
5 #include <linux/pm_runtime.h>
6 #include <linux/err.h>
7 #include <linux/mm.h>
8 #include <linux/errno.h>
9 #include <linux/memblock.h>
10 #include <linux/export.h>
11 #include <linux/string.h>
12 #include <linux/of.h>
13 #include <linux/of_platform.h>
14 #include <linux/device.h>
15 #include <linux/clk.h>
16 #include <linux/clk-private.h>
17 #include <linux/pm_domain.h>
18 #include <linux/sched.h>
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/delay.h>
22
23 #include <asm/cacheflush.h>
24 #include <asm/pgtable.h>
25
26 #include <dt-bindings/sysmmu/sysmmu.h>
27
28 #include "exynos-iommu.h"
29
30 #define CFG_MASK 0x01101FBC /* Selecting bit 24, 20, 12-7, 5-2 */
31
32 #define PB_INFO_NUM(reg) ((reg) & 0xFF)
33 #define PB_GRP_NUM(reg) ((reg) >> 20)
34 #define L1TLB_ATTR_IM (1 << 16)
35
36 #define REG_PT_BASE_PPN 0x00C
37 #define REG_MMU_FLUSH 0x010
38 #define REG_MMU_FLUSH_ENTRY 0x014
39 #define REG_MMU_FLUSH_RANGE 0x018
40 #define REG_FLUSH_RANGE_START 0x020
41 #define REG_FLUSH_RANGE_END 0x024
42 #define REG_MMU_CAPA 0x030
43 #define REG_MMU_CAPA_1 0x038
44 #define REG_INT_STATUS 0x060
45 #define REG_INT_CLEAR 0x064
46 #define REG_FAULT_AR_ADDR 0x070
47 #define REG_FAULT_AR_TRANS_INFO 0x078
48 #define REG_FAULT_AW_ADDR 0x080
49 #define REG_FAULT_AW_TRANS_INFO 0x088
50 #define REG_L1TLB_CFG 0x100 /* sysmmu v5.1 only */
51 #define REG_L1TLB_CTRL 0x108 /* sysmmu v5.1 only */
52 #define REG_L2TLB_CFG 0x200 /* sysmmu that has L2TLB only*/
53 #define REG_PB_LMM 0x300
54 #define REG_PB_GRP_STATE 0x304
55 #define REG_PB_INDICATE 0x308
56 #define REG_PB_CFG 0x310
57 #define REG_PB_START_ADDR 0x320
58 #define REG_PB_END_ADDR 0x328
59 #define REG_PB_AXI_ID 0x330
60 #define REG_PB_INFO 0x350
61 #define REG_SW_DF_VPN 0x400 /* sysmmu v5.1 only */
62 #define REG_SW_DF_VPN_CMD_NUM 0x408 /* sysmmu v5.1 only */
63 #define REG_L1TLB_READ_ENTRY 0x750
64 #define REG_L1TLB_ENTRY_VPN 0x754
65 #define REG_L1TLB_ENTRY_PPN 0x75C
66 #define REG_L1TLB_ENTRY_ATTR 0x764
67 #define REG_L2TLB_READ_ENTRY 0x770
68 #define REG_L2TLB_ENTRY_VPN 0x774
69 #define REG_L2TLB_ENTRY_PPN 0x77C
70 #define REG_L2TLB_ENTRY_ATTR 0x784
71 #define REG_PCI_SPB0_SVPN 0x7A0
72 #define REG_PCI_SPB0_EVPN 0x7A4
73 #define REG_PCI_SPB0_SLOT_VALID 0x7A8
74 #define REG_PCI_SPB1_SVPN 0x7B0
75 #define REG_PCI_SPB1_EVPN 0x7B4
76 #define REG_PCI_SPB1_SLOT_VALID 0x7B8
77
78 /* 'reg' argument must be the value of REG_MMU_CAPA register */
79 #define MMU_NUM_L1TLB_ENTRIES(reg) (reg & 0xFF)
80 #define MMU_HAVE_PB(reg) (!!((reg >> 20) & 0xF))
81 #define MMU_PB_GRP_NUM(reg) (((reg >> 20) & 0xF))
82 #define MMU_HAVE_L2TLB(reg) (!!((reg >> 8) & 0xF))
83
84 #define MMU_MAX_DF_CMD 8
85 #define MAX_NUM_PPC 4
86
87 #define SYSMMU_FAULTS_NUM (SYSMMU_FAULT_UNKNOWN + 1)
88
89 const char *ppc_event_name[] = {
90 "TOTAL",
91 "L1TLB MISS",
92 "L2TLB MISS",
93 "FLPD CACHE MISS",
94 "PB LOOK-UP",
95 "PB MISS",
96 "BLOCK NUM BY PREFETCHING",
97 "BLOCK CYCLE BY PREFETCHING",
98 "TLB MISS",
99 "FLPD MISS ON PREFETCHING",
100 };
101
102 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
103 "PTW ACCESS FAULT",
104 "PAGE FAULT",
105 "L1TLB MULTI-HIT FAULT",
106 "ACCESS FAULT",
107 "SECURITY FAULT",
108 "UNKNOWN FAULT"
109 };
110
111 static char *sysmmu_clock_names[SYSMMU_CLK_NUM] = {"aclk", "pclk", "master"};
112
113 static const char * const sysmmu_prop_opts[] = {
114 [SYSMMU_PROP_RESERVED] = "Reserved",
115 [SYSMMU_PROP_READ] = "r",
116 [SYSMMU_PROP_WRITE] = "w",
117 [SYSMMU_PROP_READWRITE] = "rw", /* default */
118 };
119
120 static int iova_from_sent(sysmmu_pte_t *base, sysmmu_pte_t *sent)
121 {
122 return ((unsigned long)sent - (unsigned long)base) *
123 (SECT_SIZE / sizeof(sysmmu_pte_t));
124 }
125
126 struct sysmmu_list_data {
127 struct device *sysmmu;
128 struct list_head node; /* entry of exynos_iommu_owner.mmu_list */
129 };
130
131 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
132 #define for_each_sysmmu_list(dev, sysmmu_list) \
133 list_for_each_entry(sysmmu_list, \
134 &((struct exynos_iommu_owner *)dev->archdata.iommu)->mmu_list,\
135 node)
136
137 static struct exynos_iommu_owner *sysmmu_owner_list = NULL;
138 static struct sysmmu_drvdata *sysmmu_drvdata_list = NULL;
139
140 static struct kmem_cache *lv2table_kmem_cache;
141 static phys_addr_t fault_page;
142 static struct dentry *exynos_sysmmu_debugfs_root;
143
144 #ifdef CONFIG_ARM
145 static inline void pgtable_flush(void *vastart, void *vaend)
146 {
147 dmac_flush_range(vastart, vaend);
148 outer_flush_range(virt_to_phys(vastart),
149 virt_to_phys(vaend));
150 }
151 #else /* ARM64 */
152 static inline void pgtable_flush(void *vastart, void *vaend)
153 {
154 dma_sync_single_for_device(NULL,
155 virt_to_phys(vastart),
156 (size_t)(virt_to_phys(vaend) - virt_to_phys(vastart)),
157 DMA_TO_DEVICE);
158 }
159 #endif
160
161 static bool has_sysmmu_capable_pbuf(void __iomem *sfrbase)
162 {
163 unsigned long cfg = __raw_readl(sfrbase + REG_MMU_CAPA);
164
165 return MMU_HAVE_PB(cfg) ? true : false;
166 }
167
168
169 void __sysmmu_tlb_invalidate_flpdcache(void __iomem *sfrbase, dma_addr_t iova)
170 {
171 if (has_sysmmu_capable_pbuf(sfrbase))
172 writel(iova | 0x1, sfrbase + REG_MMU_FLUSH_ENTRY);
173 }
174
175 void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, dma_addr_t iova)
176 {
177 writel(iova | 0x1, sfrbase + REG_MMU_FLUSH_ENTRY);
178 }
179
180 static void __sysmmu_tlb_invalidate_all(void __iomem *sfrbase)
181 {
182 writel(0x1, sfrbase + REG_MMU_FLUSH);
183 }
184
185 void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *drvdata,
186 dma_addr_t iova, size_t size)
187 {
188 void * __iomem sfrbase = drvdata->sfrbase;
189
190 if (__raw_sysmmu_version(sfrbase) >= MAKE_MMU_VER(5, 1)) {
191 __raw_writel(iova, sfrbase + REG_FLUSH_RANGE_START);
192 __raw_writel(size - 1 + iova, sfrbase + REG_FLUSH_RANGE_END);
193 writel(0x1, sfrbase + REG_MMU_FLUSH_RANGE);
194 SYSMMU_EVENT_LOG_TLB_INV_RANGE(SYSMMU_DRVDATA_TO_LOG(drvdata),
195 iova, iova + size);
196 } else {
197 if (sysmmu_block(sfrbase)) {
198 __sysmmu_tlb_invalidate_all(sfrbase);
199 SYSMMU_EVENT_LOG_TLB_INV_ALL(
200 SYSMMU_DRVDATA_TO_LOG(drvdata));
201 }
202 sysmmu_unblock(sfrbase);
203 }
204 }
205
206 void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable)
207 {
208 __raw_writel(pfn_pgtable, sfrbase + REG_PT_BASE_PPN);
209
210 __sysmmu_tlb_invalidate_all(sfrbase);
211 }
212
213 static void __sysmmu_disable_pbuf(struct sysmmu_drvdata *drvdata,
214 int target_grp)
215 {
216 unsigned int i, num_pb;
217 void * __iomem sfrbase = drvdata->sfrbase;
218
219 if (target_grp >= 0)
220 __raw_writel(target_grp << 8, sfrbase + REG_PB_INDICATE);
221
222 __raw_writel(0, sfrbase + REG_PB_LMM);
223
224 SYSMMU_EVENT_LOG_PBLMM(SYSMMU_DRVDATA_TO_LOG(drvdata), 0, 0);
225
226 num_pb = PB_INFO_NUM(__raw_readl(sfrbase + REG_PB_INFO));
227 for (i = 0; i < num_pb; i++) {
228 __raw_writel((target_grp << 8) | i, sfrbase + REG_PB_INDICATE);
229 __raw_writel(0, sfrbase + REG_PB_CFG);
230 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata), 0, 0, 0);
231 }
232 }
233
234 static unsigned int find_lmm_preset(unsigned int num_pb, unsigned int num_bufs)
235 {
236 static char lmm_preset[4][6] = { /* [num of PB][num of buffers] */
237 /* 1, 2, 3, 4, 5, 6 */
238 { 1, 1, 0, -1, -1, -1}, /* num of pb: 3 */
239 { 3, 2, 1, 0, -1, -1}, /* num of pb: 4 */
240 {-1, -1, -1, -1, -1, -1},
241 { 5, 5, 4, 2, 1, 0}, /* num of pb: 6 */
242 };
243 unsigned int lmm;
244
245 BUG_ON(num_bufs > 6);
246 lmm = lmm_preset[num_pb - 3][num_bufs - 1];
247 BUG_ON(lmm == -1);
248 return lmm;
249 }
250
251 static unsigned int find_num_pb(unsigned int num_pb, unsigned int lmm)
252 {
253 static char lmm_preset[6][6] = { /* [pb_num - 1][pb_lmm] */
254 {0, 0, 0, 0, 0, 0},
255 {0, 0, 0, 0, 0, 0},
256 {3, 2, 0, 0, 0, 0},
257 {4, 3, 2, 1, 0, 0},
258 {0, 0, 0, 0, 0, 0},
259 {6, 5, 4, 3, 3, 2},
260 };
261
262 num_pb = lmm_preset[num_pb - 1][lmm];
263 BUG_ON(num_pb == 0);
264 return num_pb;
265 }
266
267 static void __sysmmu_set_pbuf(struct sysmmu_drvdata *drvdata, int target_grp,
268 struct sysmmu_prefbuf prefbuf[], int num_bufs)
269 {
270 unsigned int i, num_pb, lmm;
271
272 __raw_writel(target_grp << 8, drvdata->sfrbase + REG_PB_INDICATE);
273
274 num_pb = PB_INFO_NUM(__raw_readl(drvdata->sfrbase + REG_PB_INFO));
275
276 lmm = find_lmm_preset(num_pb, (unsigned int)num_bufs);
277 num_pb = find_num_pb(num_pb, lmm);
278
279 __raw_writel(lmm, drvdata->sfrbase + REG_PB_LMM);
280
281 SYSMMU_EVENT_LOG_PBLMM(SYSMMU_DRVDATA_TO_LOG(drvdata), lmm, num_bufs);
282
283 for (i = 0; i < num_pb; i++) {
284 __raw_writel(target_grp << 8 | i,
285 drvdata->sfrbase + REG_PB_INDICATE);
286 __raw_writel(0, drvdata->sfrbase + REG_PB_CFG);
287 if ((prefbuf[i].size > 0) && (i < num_bufs)) {
288 __raw_writel(prefbuf[i].base,
289 drvdata->sfrbase + REG_PB_START_ADDR);
290 __raw_writel(prefbuf[i].size - 1 + prefbuf[i].base,
291 drvdata->sfrbase + REG_PB_END_ADDR);
292 __raw_writel(prefbuf[i].config | 1,
293 drvdata->sfrbase + REG_PB_CFG);
294
295 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata),
296 prefbuf[i].config | 1, prefbuf[i].base,
297 prefbuf[i].size - 1 + prefbuf[i].base);
298 } else {
299 if (i < num_bufs)
300 dev_err(drvdata->sysmmu,
301 "%s: Trying to init PB[%d/%d]with zero-size\n",
302 __func__, i, num_bufs);
303 SYSMMU_EVENT_LOG_PBSET(SYSMMU_DRVDATA_TO_LOG(drvdata),
304 0, 0, 0);
305 }
306 }
307 }
308
309 static void __sysmmu_set_pbuf_axi_id(struct sysmmu_drvdata *drvdata,
310 struct pb_info *pb, unsigned int ipoption[],
311 unsigned int opoption[])
312 {
313 int i, j, num_pb, lmm;
314 int ret_num_pb = 0;
315 int total_plane_num = pb->ar_id_num + pb->aw_id_num;
316 u32 opt;
317
318 if (total_plane_num <= 0)
319 return;
320
321 if (pb->grp_num < 0) {
322 pr_err("The group number(%d) is invalid\n", pb->grp_num);
323 return;
324 }
325
326 __raw_writel(pb->grp_num << 8, drvdata->sfrbase + REG_PB_INDICATE);
327
328 num_pb = PB_INFO_NUM(__raw_readl(drvdata->sfrbase + REG_PB_INFO));
329
330 lmm = find_lmm_preset(num_pb, total_plane_num);
331 num_pb = find_num_pb(num_pb, lmm);
332
333 __raw_writel(lmm, drvdata->sfrbase + REG_PB_LMM);
334
335 ret_num_pb = min(pb->ar_id_num, num_pb);
336 for (i = 0; i < ret_num_pb; i++) {
337 __raw_writel((pb->grp_num << 8) | i,
338 drvdata->sfrbase + REG_PB_INDICATE);
339 __raw_writel(0, drvdata->sfrbase + REG_PB_CFG);
340 __raw_writel((0xFFFF << 16) | pb->ar_axi_id[i],
341 drvdata->sfrbase + REG_PB_AXI_ID);
342 opt = ipoption ? ipoption[i] : SYSMMU_PBUFCFG_DEFAULT_INPUT;
343 __raw_writel(opt | 0x100001,
344 drvdata->sfrbase + REG_PB_CFG);
345 }
346
347 if ((num_pb > ret_num_pb)) {
348 for (i = ret_num_pb, j = 0; i < num_pb; i++, j++) {
349 __raw_writel((pb->grp_num << 8) | i,
350 drvdata->sfrbase + REG_PB_INDICATE);
351 __raw_writel(0, drvdata->sfrbase + REG_PB_CFG);
352 __raw_writel((0xFFFF << 16) | pb->aw_axi_id[j],
353 drvdata->sfrbase + REG_PB_AXI_ID);
354 opt = opoption ? opoption[i] : SYSMMU_PBUFCFG_DEFAULT_OUTPUT;
355 __raw_writel(opt | 0x100001,
356 drvdata->sfrbase + REG_PB_CFG);
357 }
358 }
359 }
360
361 static void __sysmmu_set_pbuf_property(struct sysmmu_drvdata *drvdata,
362 struct pb_info *pb, unsigned int ipoption[],
363 unsigned int opoption[])
364 {
365 int i, num_pb, lmm;
366 int ret_num_pb = 0;
367 int total_plane_num = pb->ar_id_num + pb->aw_id_num;
368 u32 opt;
369
370 if (total_plane_num <= 0)
371 return;
372
373 if (pb->grp_num < 0) {
374 pr_err("The group number(%d) is invalid\n", pb->grp_num);
375 return;
376 }
377
378 num_pb = PB_INFO_NUM(__raw_readl(drvdata->sfrbase + REG_PB_INFO));
379 lmm = find_lmm_preset(num_pb, total_plane_num);
380 num_pb = find_num_pb(num_pb, lmm);
381
382 ret_num_pb = min(pb->ar_id_num, num_pb);
383 for (i = 0; i < ret_num_pb; i++) {
384 __raw_writel((pb->grp_num << 8) | i,
385 drvdata->sfrbase + REG_PB_INDICATE);
386 opt = ipoption ? ipoption[i] : SYSMMU_PBUFCFG_DEFAULT_INPUT;
387 __raw_writel(opt | 0x100001,
388 drvdata->sfrbase + REG_PB_CFG);
389 }
390
391 if ((num_pb > ret_num_pb)) {
392 for (i = ret_num_pb; i < num_pb; i++) {
393 __raw_writel((pb->grp_num << 8) | i,
394 drvdata->sfrbase + REG_PB_INDICATE);
395 opt = opoption ? opoption[i] : SYSMMU_PBUFCFG_DEFAULT_OUTPUT;
396 __raw_writel(opt | 0x100001,
397 drvdata->sfrbase + REG_PB_CFG);
398 }
399 }
400 }
401
402 static void __exynos_sysmmu_set_prefbuf_by_region(
403 struct sysmmu_drvdata *drvdata, struct device *dev,
404 struct sysmmu_prefbuf pb_reg[], unsigned int num_reg)
405 {
406 struct pb_info *pb;
407 unsigned int i;
408 int orig_num_reg, num_bufs = 0;
409 struct sysmmu_prefbuf prefbuf[6];
410
411 if (!has_sysmmu_capable_pbuf(drvdata->sfrbase))
412 return;
413
414 if ((num_reg == 0) || (pb_reg == NULL)) {
415 /* Disabling prefetch buffers */
416 __sysmmu_disable_pbuf(drvdata, -1);
417 return;
418 }
419
420 orig_num_reg = num_reg;
421
422 list_for_each_entry(pb, &drvdata->pb_grp_list, node) {
423 if (pb->master == dev) {
424 for (i = 0; i < orig_num_reg; i++) {
425 if (((pb_reg[i].config & SYSMMU_PBUFCFG_WRITE) &&
426 (pb->prop & SYSMMU_PROP_WRITE)) ||
427 (!(pb_reg[i].config & SYSMMU_PBUFCFG_WRITE) &&
428 (pb->prop & SYSMMU_PROP_READ))) {
429 if (num_reg > 0)
430 num_reg--;
431 else if (num_reg == 0)
432 break;
433
434 prefbuf[num_bufs++] = pb_reg[i];
435 }
436 }
437 if (num_bufs)
438 __sysmmu_set_pbuf(drvdata, pb->grp_num, prefbuf,
439 num_bufs);
440 num_bufs = 0;
441 }
442 }
443 }
444
445 static void __exynos_sysmmu_set_prefbuf_axi_id(struct sysmmu_drvdata *drvdata,
446 struct device *master, unsigned int inplanes,
447 unsigned int onplanes, unsigned int ipoption[],
448 unsigned int opoption[])
449 {
450 struct pb_info *pb;
451
452 if (!has_sysmmu_capable_pbuf(drvdata->sfrbase))
453 return;
454
455 list_for_each_entry(pb, &drvdata->pb_grp_list, node) {
456 if (master) {
457 if (pb->master == master) {
458 struct pb_info tpb;
459 memcpy(&tpb, pb, sizeof(tpb));
460 tpb.ar_id_num = inplanes;
461 tpb.aw_id_num = onplanes;
462 __sysmmu_set_pbuf_axi_id(drvdata, &tpb,
463 ipoption, opoption);
464 }
465 continue;
466 }
467 __sysmmu_set_pbuf_axi_id(drvdata, pb,
468 ipoption, opoption);
469 }
470 }
471
472 static void __exynos_sysmmu_set_prefbuf_property(struct sysmmu_drvdata *drvdata,
473 struct device *master, unsigned int inplanes,
474 unsigned int onplanes, unsigned int ipoption[],
475 unsigned int opoption[])
476 {
477 struct pb_info *pb;
478
479 if (!has_sysmmu_capable_pbuf(drvdata->sfrbase))
480 return;
481
482 if (!master)
483 return;
484
485 list_for_each_entry(pb, &drvdata->pb_grp_list, node) {
486 if (pb->master == master) {
487 struct pb_info tpb;
488 memcpy(&tpb, pb, sizeof(tpb));
489 tpb.ar_id_num = inplanes;
490 tpb.aw_id_num = onplanes;
491 __sysmmu_set_pbuf_property(drvdata, &tpb,
492 ipoption, opoption);
493 }
494 }
495 }
496
497 static void __sysmmu_set_df(void __iomem *sfrbase,
498 dma_addr_t iova)
499 {
500 __raw_writel(iova, sfrbase + REG_SW_DF_VPN);
501 }
502
503 void __exynos_sysmmu_set_df(struct sysmmu_drvdata *drvdata, dma_addr_t iova)
504 {
505 #ifdef CONFIG_EXYNOS7_IOMMU_CHECK_DF
506 int i, num_l1tlb, df_cnt = 0;
507 #endif
508 u32 cfg;
509
510 if (MAKE_MMU_VER(5, 1) > __raw_sysmmu_version(drvdata->sfrbase)) {
511 dev_err(drvdata->sysmmu, "%s: SW direct fetch not supported\n",
512 __func__);
513 return;
514 }
515
516 #ifdef CONFIG_EXYNOS7_IOMMU_CHECK_DF
517 num_l1tlb = MMU_NUM_L1TLB_ENTRIES(__raw_readl(drvdata->sfrbase +
518 REG_MMU_CAPA));
519 for (i = 0; i < num_l1tlb; i++) {
520 __raw_writel(i, drvdata->sfrbase + REG_L1TLB_READ_ENTRY);
521 cfg = __raw_readl(drvdata->sfrbase + REG_L1TLB_ENTRY_ATTR);
522 if (cfg & L1TLB_ATTR_IM)
523 df_cnt++;
524 }
525
526 if (df_cnt == num_l1tlb) {
527 dev_err(drvdata->sysmmu,
528 "%s: All TLBs are special slots", __func__);
529 return;
530 }
531
532 cfg = __raw_readl(drvdata->sfrbase + REG_SW_DF_VPN_CMD_NUM);
533
534 if ((cfg & 0xFF) > 9) {
535 dev_info(drvdata->sysmmu,
536 "%s: DF command queue is full\n", __func__);
537 } else {
538 #endif
539 __sysmmu_set_df(drvdata->sfrbase, iova);
540 SYSMMU_EVENT_LOG_DF(SYSMMU_DRVDATA_TO_LOG(drvdata), iova);
541 }
542 }
543
544 void __exynos_sysmmu_release_df(struct sysmmu_drvdata *drvdata)
545 {
546 if (__raw_sysmmu_version(drvdata->sfrbase) >= MAKE_MMU_VER(5, 1)) {
547 __raw_writel(0x1, drvdata->sfrbase + REG_L1TLB_CTRL);
548 SYSMMU_EVENT_LOG_DF_UNLOCK_ALL(SYSMMU_DRVDATA_TO_LOG(drvdata));
549 } else {
550 dev_err(drvdata->sysmmu, "DF is not supported");
551 }
552 }
553
554 void dump_sysmmu_tlb_pb(void __iomem *sfrbase)
555 {
556 int i, j, capa, num_pb, lmm;
557 pgd_t *pgd;
558 pud_t *pud;
559 pmd_t *pmd;
560 pte_t *pte;
561 phys_addr_t phys;
562
563 pgd = pgd_offset_k((unsigned long)sfrbase);
564 if (!pgd) {
565 pr_crit("Invalid virtual address %p\n", sfrbase);
566 return;
567 }
568
569 pud = pud_offset(pgd, (unsigned long)sfrbase);
570 if (!pud) {
571 pr_crit("Invalid virtual address %p\n", sfrbase);
572 return;
573 }
574
575 pmd = pmd_offset(pud, (unsigned long)sfrbase);
576 if (!pmd) {
577 pr_crit("Invalid virtual address %p\n", sfrbase);
578 return;
579 }
580
581 pte = pte_offset_kernel(pmd, (unsigned long)sfrbase);
582 if (!pte) {
583 pr_crit("Invalid virtual address %p\n", sfrbase);
584 return;
585 }
586
587 capa = __raw_readl(sfrbase + REG_MMU_CAPA);
588 lmm = MMU_RAW_VER(__raw_readl(sfrbase + REG_MMU_VERSION));
589
590 phys = pte_pfn(*pte) << PAGE_SHIFT;
591 pr_crit("ADDR: %pa(VA: %p), MMU_CTRL: %#010x, PT_BASE: %#010x\n",
592 &phys, sfrbase,
593 __raw_readl(sfrbase + REG_MMU_CTRL),
594 __raw_readl(sfrbase + REG_PT_BASE_PPN));
595 pr_crit("VERSION %d.%d, MMU_CFG: %#010x, MMU_STATUS: %#010x\n",
596 MMU_MAJ_VER(lmm), MMU_MIN_VER(lmm),
597 __raw_readl(sfrbase + REG_MMU_CFG),
598 __raw_readl(sfrbase + REG_MMU_STATUS));
599
600 if (MMU_HAVE_L2TLB(__raw_readl(sfrbase + REG_MMU_CAPA_1)))
601 pr_crit("Level 2 TLB: %s\n",
602 (__raw_readl(sfrbase + REG_L2TLB_CFG) == 1) ?
603 "on" : "off");
604
605 pr_crit("---------- Level 1 TLB -----------------------------------\n");
606
607 for (i = 0; i < MMU_NUM_L1TLB_ENTRIES(capa); i++) {
608 __raw_writel(i, sfrbase + REG_L1TLB_READ_ENTRY);
609 pr_crit("[%02d] VPN: %#010x, PPN: %#010x, ATTR: %#010x\n",
610 i, __raw_readl(sfrbase + REG_L1TLB_ENTRY_VPN),
611 __raw_readl(sfrbase + REG_L1TLB_ENTRY_PPN),
612 __raw_readl(sfrbase + REG_L1TLB_ENTRY_ATTR));
613 }
614
615 if (!has_sysmmu_capable_pbuf(sfrbase))
616 return;
617
618 pr_crit("---------- Prefetch Buffers ------------------------------\n");
619
620 for (i = 0; i < MMU_PB_GRP_NUM(capa); i++) {
621 __raw_writel(i << 8, sfrbase + REG_PB_INDICATE);
622
623 num_pb = PB_INFO_NUM(__raw_readl(sfrbase + REG_PB_INFO));
624 lmm = __raw_readl(sfrbase + REG_PB_LMM);
625 pr_crit("PB_INFO[%d]: %#010x, PB_LMM: %#010x\n",
626 i, num_pb, lmm);
627
628 num_pb = find_num_pb(num_pb, lmm);
629 for (j = 0; j < num_pb; j++) {
630 __raw_writel((i << 8) | j, sfrbase + REG_PB_INDICATE);
631 pr_crit("PB[%d][%d] = CFG: %#010x, AXI ID: %#010x, ", i,
632 j, __raw_readl(sfrbase + REG_PB_CFG),
633 __raw_readl(sfrbase + REG_PB_AXI_ID));
634 pr_crit("PB[%d][%d] START: %#010x, END: %#010x\n", i, j,
635 __raw_readl(sfrbase + REG_PB_START_ADDR),
636 __raw_readl(sfrbase + REG_PB_END_ADDR));
637 pr_crit("SPB0 START: %#010x, END: %#010x, VALID: %#010x\n",
638 __raw_readl(sfrbase + REG_PCI_SPB0_SVPN),
639 __raw_readl(sfrbase + REG_PCI_SPB0_EVPN),
640 __raw_readl(sfrbase + REG_PCI_SPB0_SLOT_VALID));
641 pr_crit("SPB1 START: %#010x, END: %#010x, VALID: %#010x\n",
642 __raw_readl(sfrbase + REG_PCI_SPB1_SVPN),
643 __raw_readl(sfrbase + REG_PCI_SPB1_EVPN),
644 __raw_readl(sfrbase + REG_PCI_SPB1_SLOT_VALID));
645 }
646 }
647 }
648
649 static void show_fault_information(struct sysmmu_drvdata *drvdata,
650 int flags, unsigned long fault_addr)
651 {
652 unsigned int info;
653 phys_addr_t pgtable;
654 int fault_id = SYSMMU_FAULT_ID(flags);
655
656 pgtable = __raw_readl(drvdata->sfrbase + REG_PT_BASE_PPN);
657 pgtable <<= PAGE_SHIFT;
658
659 pr_crit("----------------------------------------------------------\n");
660 pr_crit("%s %s %s at %#010lx (page table @ %pa)\n",
661 dev_name(drvdata->sysmmu),
662 (flags & IOMMU_FAULT_WRITE) ? "WRITE" : "READ",
663 sysmmu_fault_name[fault_id], fault_addr, &pgtable);
664
665 if (fault_id == SYSMMU_FAULT_UNKNOWN) {
666 pr_crit("The fault is not caused by this System MMU.\n");
667 pr_crit("Please check IRQ and SFR base address.\n");
668 goto finish;
669 }
670
671 info = __raw_readl(drvdata->sfrbase +
672 ((flags & IOMMU_FAULT_WRITE) ?
673 REG_FAULT_AW_TRANS_INFO : REG_FAULT_AR_TRANS_INFO));
674 pr_crit("AxID: %#x, AxLEN: %#x\n", info & 0xFFFF, (info >> 16) & 0xF);
675
676 if (pgtable != drvdata->pgtable)
677 pr_crit("Page table base of driver: %pa\n",
678 &drvdata->pgtable);
679
680 if (fault_id == SYSMMU_FAULT_PTW_ACCESS)
681 pr_crit("System MMU has failed to access page table\n");
682
683 if (!pfn_valid(pgtable >> PAGE_SHIFT)) {
684 pr_crit("Page table base is not in a valid memory region\n");
685 } else {
686 sysmmu_pte_t *ent;
687 ent = section_entry(phys_to_virt(pgtable), fault_addr);
688 pr_crit("Lv1 entry: %#010x\n", *ent);
689
690 if (lv1ent_page(ent)) {
691 ent = page_entry(ent, fault_addr);
692 pr_crit("Lv2 entry: %#010x\n", *ent);
693 }
694 }
695
696 dump_sysmmu_tlb_pb(drvdata->sfrbase);
697
698 finish:
699 pr_crit("----------------------------------------------------------\n");
700 }
701
702 #define REG_INT_STATUS_WRITE_BIT 16
703
704 irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
705 {
706 /* SYSMMU is in blocked when interrupt occurred. */
707 struct sysmmu_drvdata *drvdata = dev_id;
708 unsigned int itype;
709 unsigned long addr = -1;
710 int flags = 0;
711
712 WARN(!is_sysmmu_active(drvdata),
713 "Fault occurred while System MMU %s is not enabled!\n",
714 dev_name(drvdata->sysmmu));
715
716 itype = __ffs(__raw_readl(drvdata->sfrbase + REG_INT_STATUS));
717 if (itype >= REG_INT_STATUS_WRITE_BIT) {
718 itype -= REG_INT_STATUS_WRITE_BIT;
719 flags = IOMMU_FAULT_WRITE;
720 }
721
722 if (WARN_ON(!(itype < SYSMMU_FAULT_UNKNOWN)))
723 itype = SYSMMU_FAULT_UNKNOWN;
724 else
725 addr = __raw_readl(drvdata->sfrbase +
726 ((flags & IOMMU_FAULT_WRITE) ?
727 REG_FAULT_AW_ADDR : REG_FAULT_AR_ADDR));
728 flags |= SYSMMU_FAULT_FLAG(itype);
729
730 show_fault_information(drvdata, flags, addr);
731
732 atomic_notifier_call_chain(&drvdata->fault_notifiers, addr, &flags);
733
734 #if 0 /* Recovering System MMU fault is available from System MMU v6 */
735 if ((ret == 0) &&
736 ((itype == SYSMMU_FAULT_PAGE_FAULT) ||
737 (itype == SYSMMU_FAULT_ACCESS))) {
738 if (flags & IOMMU_FAULT_WRITE)
739 itype += REG_INT_STATUS_WRITE_BIT;
740 __raw_writel(1 << itype, drvdata->sfrbase + REG_INT_CLEAR);
741
742 sysmmu_unblock(drvdata->sfrbase);
743 } else
744 #endif
745
746 panic("Unrecoverable System MMU Fault!!");
747
748 return IRQ_HANDLED;
749 }
750
751 void __sysmmu_init_config(struct sysmmu_drvdata *drvdata)
752 {
753 unsigned long cfg;
754
755 __raw_writel(CTRL_BLOCK, drvdata->sfrbase + REG_MMU_CTRL);
756
757 cfg = CFG_FLPDCACHE;
758
759 if (!(drvdata->prop & SYSMMU_PROP_DISABLE_ACG))
760 cfg |= CFG_ACGEN;
761
762 if (!(drvdata->qos < 0))
763 cfg |= CFG_QOS_OVRRIDE | CFG_QOS(drvdata->qos);
764
765 if (has_sysmmu_capable_pbuf(drvdata->sfrbase))
766 __exynos_sysmmu_set_prefbuf_axi_id(drvdata, NULL, 0, 0,
767 NULL, NULL);
768
769 cfg |= __raw_readl(drvdata->sfrbase + REG_MMU_CFG) & ~CFG_MASK;
770 __raw_writel(cfg, drvdata->sfrbase + REG_MMU_CFG);
771 }
772
773 void sysmmu_tlb_invalidate_flpdcache(struct device *dev, dma_addr_t iova)
774 {
775 struct sysmmu_list_data *list;
776
777 for_each_sysmmu_list(dev, list) {
778 unsigned long flags;
779 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
780
781 spin_lock_irqsave(&drvdata->lock, flags);
782 if (is_sysmmu_active(drvdata) &&
783 is_sysmmu_runtime_active(drvdata)) {
784 TRACE_LOG_DEV(drvdata->sysmmu,
785 "FLPD invalidation @ %#x\n", iova);
786 __master_clk_enable(drvdata);
787 __sysmmu_tlb_invalidate_flpdcache(
788 drvdata->sfrbase, iova);
789 SYSMMU_EVENT_LOG_FLPD_FLUSH(
790 SYSMMU_DRVDATA_TO_LOG(drvdata), iova);
791 __master_clk_disable(drvdata);
792 } else {
793 TRACE_LOG_DEV(drvdata->sysmmu,
794 "Skip FLPD invalidation @ %#x\n", iova);
795 }
796 spin_unlock_irqrestore(&drvdata->lock, flags);
797 }
798 }
799
800 static void sysmmu_tlb_invalidate_entry(struct device *dev, dma_addr_t iova,
801 bool force)
802 {
803 struct sysmmu_list_data *list;
804
805 for_each_sysmmu_list(dev, list) {
806 unsigned long flags;
807 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
808
809 if (!force && !(drvdata->prop & SYSMMU_PROP_NONBLOCK_TLBINV))
810 continue;
811
812 spin_lock_irqsave(&drvdata->lock, flags);
813 if (is_sysmmu_active(drvdata) &&
814 is_sysmmu_runtime_active(drvdata)) {
815 TRACE_LOG_DEV(drvdata->sysmmu,
816 "TLB invalidation @ %#x\n", iova);
817 __master_clk_enable(drvdata);
818 __sysmmu_tlb_invalidate_entry(drvdata->sfrbase, iova);
819 SYSMMU_EVENT_LOG_TLB_INV_VPN(
820 SYSMMU_DRVDATA_TO_LOG(drvdata), iova);
821 __master_clk_disable(drvdata);
822 } else {
823 TRACE_LOG_DEV(drvdata->sysmmu,
824 "Skip TLB invalidation @ %#x\n", iova);
825 }
826 spin_unlock_irqrestore(&drvdata->lock, flags);
827 }
828 }
829
830 void exynos_sysmmu_tlb_invalidate(struct iommu_domain *domain, dma_addr_t start,
831 size_t size)
832 {
833 struct exynos_iommu_domain *priv = domain->priv;
834 struct exynos_iommu_owner *owner;
835 struct sysmmu_list_data *list;
836 unsigned long flags;
837
838 spin_lock_irqsave(&priv->lock, flags);
839 list_for_each_entry(owner, &priv->clients, client) {
840 for_each_sysmmu_list(owner->dev, list) {
841 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
842
843 if (!!(drvdata->prop & SYSMMU_PROP_NONBLOCK_TLBINV))
844 continue;
845
846 spin_lock(&drvdata->lock);
847 if (!is_sysmmu_active(drvdata) ||
848 !is_sysmmu_runtime_active(drvdata)) {
849 spin_unlock(&drvdata->lock);
850 TRACE_LOG_DEV(drvdata->sysmmu,
851 "Skip TLB invalidation %#x@%#x\n", size, start);
852 continue;
853 }
854
855 TRACE_LOG_DEV(drvdata->sysmmu,
856 "TLB invalidation %#x@%#x\n", size, start);
857
858 __master_clk_enable(drvdata);
859
860 __sysmmu_tlb_invalidate(drvdata, start, size);
861
862 __master_clk_disable(drvdata);
863
864 spin_unlock(&drvdata->lock);
865 }
866 }
867 spin_unlock_irqrestore(&priv->lock, flags);
868 }
869
870 static inline void __sysmmu_disable_nocount(struct sysmmu_drvdata *drvdata)
871 {
872 int disable = (drvdata->prop & SYSMMU_PROP_STOP_BLOCK) ?
873 CTRL_BLOCK_DISABLE : CTRL_DISABLE;
874
875 __raw_sysmmu_disable(drvdata->sfrbase, disable);
876
877 __sysmmu_clk_disable(drvdata);
878 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE))
879 __master_clk_disable(drvdata);
880
881 SYSMMU_EVENT_LOG_DISABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
882
883 TRACE_LOG("%s(%s)\n", __func__, dev_name(drvdata->sysmmu));
884 }
885
886 static bool __sysmmu_disable(struct sysmmu_drvdata *drvdata)
887 {
888 bool disabled;
889 unsigned long flags;
890
891 spin_lock_irqsave(&drvdata->lock, flags);
892
893 disabled = set_sysmmu_inactive(drvdata);
894
895 if (disabled) {
896 drvdata->pgtable = 0;
897 drvdata->domain = NULL;
898
899 if (is_sysmmu_runtime_active(drvdata)) {
900 __master_clk_enable(drvdata);
901 __sysmmu_disable_nocount(drvdata);
902 __master_clk_disable(drvdata);
903 }
904
905 TRACE_LOG_DEV(drvdata->sysmmu, "Disabled\n");
906 } else {
907 TRACE_LOG_DEV(drvdata->sysmmu, "%d times left to disable\n",
908 drvdata->activations);
909 }
910
911 spin_unlock_irqrestore(&drvdata->lock, flags);
912
913 return disabled;
914 }
915
916 static void __sysmmu_enable_nocount(struct sysmmu_drvdata *drvdata)
917 {
918 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE))
919 __master_clk_enable(drvdata);
920
921 __sysmmu_clk_enable(drvdata);
922
923 __sysmmu_init_config(drvdata);
924
925 __sysmmu_set_ptbase(drvdata->sfrbase, drvdata->pgtable / PAGE_SIZE);
926
927 __raw_sysmmu_enable(drvdata->sfrbase);
928
929 SYSMMU_EVENT_LOG_ENABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
930
931 TRACE_LOG_DEV(drvdata->sysmmu, "Really enabled\n");
932 }
933
934 static int __sysmmu_enable(struct sysmmu_drvdata *drvdata,
935 phys_addr_t pgtable, struct iommu_domain *domain)
936 {
937 int ret = 0;
938 unsigned long flags;
939
940 spin_lock_irqsave(&drvdata->lock, flags);
941 if (set_sysmmu_active(drvdata)) {
942 drvdata->pgtable = pgtable;
943 drvdata->domain = domain;
944
945 if (is_sysmmu_runtime_active(drvdata)) {
946 __master_clk_enable(drvdata);
947 __sysmmu_enable_nocount(drvdata);
948 __master_clk_disable(drvdata);
949 }
950
951 TRACE_LOG_DEV(drvdata->sysmmu, "Enabled\n");
952 } else {
953 ret = (pgtable == drvdata->pgtable) ? 1 : -EBUSY;
954
955 TRACE_LOG_DEV(drvdata->sysmmu, "Already enabled (%d)\n", ret);
956 }
957
958 if (WARN_ON(ret < 0))
959 set_sysmmu_inactive(drvdata); /* decrement count */
960
961 spin_unlock_irqrestore(&drvdata->lock, flags);
962
963 return ret;
964 }
965
966 /* __exynos_sysmmu_enable: Enables System MMU
967 *
968 * returns -error if an error occurred and System MMU is not enabled,
969 * 0 if the System MMU has been just enabled and 1 if System MMU was already
970 * enabled before.
971 */
972 static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
973 struct iommu_domain *domain)
974 {
975 int ret = 0;
976 unsigned long flags;
977 struct exynos_iommu_owner *owner = dev->archdata.iommu;
978 struct sysmmu_list_data *list;
979
980 BUG_ON(!has_sysmmu(dev));
981
982 spin_lock_irqsave(&owner->lock, flags);
983
984 for_each_sysmmu_list(dev, list) {
985 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
986 ret = __sysmmu_enable(drvdata, pgtable, domain);
987 if (ret < 0) {
988 struct sysmmu_list_data *iter;
989 for_each_sysmmu_list(dev, iter) {
990 if (iter == list)
991 break;
992 __sysmmu_disable(dev_get_drvdata(iter->sysmmu));
993 }
994 break;
995 }
996 }
997
998 spin_unlock_irqrestore(&owner->lock, flags);
999
1000 return ret;
1001 }
1002
1003 int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
1004 {
1005 int ret;
1006
1007 BUG_ON(!memblock_is_memory(pgtable));
1008
1009 ret = __exynos_sysmmu_enable(dev, pgtable, NULL);
1010
1011 return ret;
1012 }
1013
1014 bool exynos_sysmmu_disable(struct device *dev)
1015 {
1016 unsigned long flags;
1017 bool disabled = true;
1018 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1019 struct sysmmu_list_data *list;
1020
1021 BUG_ON(!has_sysmmu(dev));
1022
1023 spin_lock_irqsave(&owner->lock, flags);
1024
1025 /* Every call to __sysmmu_disable() must return same result */
1026 for_each_sysmmu_list(dev, list) {
1027 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1028 disabled = __sysmmu_disable(drvdata);
1029 }
1030
1031 spin_unlock_irqrestore(&owner->lock, flags);
1032
1033 return disabled;
1034 }
1035
1036 #ifdef CONFIG_EXYNOS_IOMMU_RECOVER_FAULT_HANDLER
1037 int recover_fault_handler (struct iommu_domain *domain,
1038 struct device *dev, unsigned long fault_addr,
1039 int itype, void *reserved)
1040 {
1041 struct exynos_iommu_domain *priv = domain->priv;
1042 struct exynos_iommu_owner *owner;
1043 unsigned long flags;
1044
1045 itype %= 16;
1046
1047 if (itype == SYSMMU_PAGEFAULT) {
1048 struct exynos_iovmm *vmm_data;
1049 sysmmu_pte_t *sent;
1050 sysmmu_pte_t *pent;
1051
1052 BUG_ON(priv->pgtable == NULL);
1053
1054 sent = section_entry(priv->pgtable, fault_addr);
1055 if (!lv1ent_page(sent)) {
1056 pent = kmem_cache_zalloc(lv2table_kmem_cache,
1057 GFP_ATOMIC);
1058 if (!pent)
1059 return -ENOMEM;
1060
1061 *sent = mk_lv1ent_page(__pa(pent));
1062 pgtable_flush(sent, sent + 1);
1063 }
1064 pent = page_entry(sent, fault_addr);
1065 if (lv2ent_fault(pent)) {
1066 *pent = mk_lv2ent_spage(fault_page);
1067 pgtable_flush(pent, pent + 1);
1068 } else {
1069 pr_err("[%s] 0x%lx by '%s' is already mapped\n",
1070 sysmmu_fault_name[itype], fault_addr,
1071 dev_name(dev));
1072 }
1073
1074 owner = dev->archdata.iommu;
1075 vmm_data = (struct exynos_iovmm *)owner->vmm_data;
1076 if (find_iovm_region(vmm_data, fault_addr)) {
1077 pr_err("[%s] 0x%lx by '%s' is remapped\n",
1078 sysmmu_fault_name[itype],
1079 fault_addr, dev_name(dev));
1080 } else {
1081 pr_err("[%s] '%s' accessed unmapped address(0x%lx)\n",
1082 sysmmu_fault_name[itype], dev_name(dev),
1083 fault_addr);
1084 }
1085 } else if (itype == SYSMMU_L1TLB_MULTIHIT) {
1086 spin_lock_irqsave(&priv->lock, flags);
1087 list_for_each_entry(owner, &priv->clients, client)
1088 sysmmu_tlb_invalidate_entry(owner->dev,
1089 (dma_addr_t)fault_addr, true);
1090 spin_unlock_irqrestore(&priv->lock, flags);
1091
1092 pr_err("[%s] occured at 0x%lx by '%s'\n",
1093 sysmmu_fault_name[itype], fault_addr, dev_name(dev));
1094 } else {
1095 return -ENOSYS;
1096 }
1097
1098 return 0;
1099 }
1100 #else
1101 int recover_fault_handler (struct iommu_domain *domain,
1102 struct device *dev, unsigned long fault_addr,
1103 int itype, void *reserved)
1104 {
1105 return -ENOSYS;
1106 }
1107 #endif
1108
1109 void sysmmu_set_prefetch_buffer_by_region(struct device *dev,
1110 struct sysmmu_prefbuf pb_reg[], unsigned int num_reg)
1111 {
1112 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1113 struct sysmmu_list_data *list;
1114 unsigned long flags;
1115
1116 if (!dev->archdata.iommu) {
1117 dev_err(dev, "%s: No System MMU is configured\n", __func__);
1118 return;
1119 }
1120
1121 spin_lock_irqsave(&owner->lock, flags);
1122
1123 for_each_sysmmu_list(dev, list) {
1124 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1125
1126 spin_lock(&drvdata->lock);
1127
1128 if (!is_sysmmu_active(drvdata) ||
1129 !is_sysmmu_runtime_active(drvdata)) {
1130 spin_unlock(&drvdata->lock);
1131 continue;
1132 }
1133
1134 __master_clk_enable(drvdata);
1135
1136 if (sysmmu_block(drvdata->sfrbase)) {
1137 __exynos_sysmmu_set_prefbuf_by_region(
1138 drvdata, dev, pb_reg, num_reg);
1139 sysmmu_unblock(drvdata->sfrbase);
1140 }
1141
1142 __master_clk_disable(drvdata);
1143
1144 spin_unlock(&drvdata->lock);
1145 }
1146
1147 spin_unlock_irqrestore(&owner->lock, flags);
1148 }
1149
1150 int sysmmu_set_prefetch_buffer_by_plane(struct device *dev,
1151 unsigned int inplanes, unsigned int onplanes,
1152 unsigned int ipoption[], unsigned int opoption[])
1153 {
1154 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1155 struct sysmmu_list_data *list;
1156 unsigned long flags;
1157
1158 if (!dev->archdata.iommu) {
1159 dev_err(dev, "%s: No System MMU is configured\n", __func__);
1160 return -EINVAL;
1161 }
1162
1163 spin_lock_irqsave(&owner->lock, flags);
1164
1165 for_each_sysmmu_list(dev, list) {
1166 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1167
1168 spin_lock(&drvdata->lock);
1169
1170 if (!is_sysmmu_active(drvdata) ||
1171 !is_sysmmu_runtime_active(drvdata)) {
1172 spin_unlock(&drvdata->lock);
1173 continue;
1174 }
1175
1176 __master_clk_enable(drvdata);
1177
1178 if (sysmmu_block(drvdata->sfrbase)) {
1179 __exynos_sysmmu_set_prefbuf_axi_id(drvdata, dev,
1180 inplanes, onplanes, ipoption, opoption);
1181 sysmmu_unblock(drvdata->sfrbase);
1182 }
1183
1184 __master_clk_disable(drvdata);
1185
1186 spin_unlock(&drvdata->lock);
1187 }
1188
1189 spin_unlock_irqrestore(&owner->lock, flags);
1190
1191 return 0;
1192 }
1193
1194 int sysmmu_set_prefetch_buffer_property(struct device *dev,
1195 unsigned int inplanes, unsigned int onplanes,
1196 unsigned int ipoption[], unsigned int opoption[])
1197 {
1198 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1199 struct sysmmu_list_data *list;
1200 unsigned long flags;
1201
1202 if (!dev->archdata.iommu) {
1203 dev_err(dev, "%s: No System MMU is configured\n", __func__);
1204 return -EINVAL;
1205 }
1206
1207 spin_lock_irqsave(&owner->lock, flags);
1208
1209 for_each_sysmmu_list(dev, list) {
1210 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1211
1212 spin_lock(&drvdata->lock);
1213
1214 if (!is_sysmmu_active(drvdata) ||
1215 !is_sysmmu_runtime_active(drvdata)) {
1216 spin_unlock(&drvdata->lock);
1217 continue;
1218 }
1219
1220 __master_clk_enable(drvdata);
1221 __exynos_sysmmu_set_prefbuf_property(drvdata, dev,
1222 inplanes, onplanes, ipoption, opoption);
1223 __master_clk_disable(drvdata);
1224
1225 spin_unlock(&drvdata->lock);
1226 }
1227
1228 spin_unlock_irqrestore(&owner->lock, flags);
1229
1230 return 0;
1231 }
1232 static void __sysmmu_set_ptwqos(struct sysmmu_drvdata *data)
1233 {
1234 u32 cfg;
1235
1236 if (!sysmmu_block(data->sfrbase))
1237 return;
1238
1239 cfg = __raw_readl(data->sfrbase + REG_MMU_CFG);
1240 cfg &= ~CFG_QOS(15); /* clearing PTW_QOS field */
1241
1242 /*
1243 * PTW_QOS of System MMU 1.x ~ 3.x are all overridable
1244 * in __sysmmu_init_config()
1245 */
1246 if (__raw_sysmmu_version(data->sfrbase) < MAKE_MMU_VER(5, 0))
1247 cfg |= CFG_QOS(data->qos);
1248 else if (!(data->qos < 0))
1249 cfg |= CFG_QOS_OVRRIDE | CFG_QOS(data->qos);
1250 else
1251 cfg &= ~CFG_QOS_OVRRIDE;
1252
1253 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
1254 sysmmu_unblock(data->sfrbase);
1255 }
1256
1257 static void __sysmmu_set_qos(struct device *dev, unsigned int qosval)
1258 {
1259 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1260 struct sysmmu_list_data *list;
1261 unsigned long flags;
1262
1263 spin_lock_irqsave(&owner->lock, flags);
1264
1265 for_each_sysmmu_list(dev, list) {
1266 struct sysmmu_drvdata *data;
1267 data = dev_get_drvdata(list->sysmmu);
1268 spin_lock(&data->lock);
1269 data->qos = qosval;
1270 if (is_sysmmu_really_enabled(data)) {
1271 __master_clk_enable(data);
1272 __sysmmu_set_ptwqos(data);
1273 __master_clk_disable(data);
1274 }
1275 spin_unlock(&data->lock);
1276 }
1277
1278 spin_unlock_irqrestore(&owner->lock, flags);
1279 }
1280
1281 void sysmmu_set_qos(struct device *dev, unsigned int qos)
1282 {
1283 __sysmmu_set_qos(dev, (qos > 15) ? 15 : qos);
1284 }
1285
1286 void sysmmu_reset_qos(struct device *dev)
1287 {
1288 __sysmmu_set_qos(dev, DEFAULT_QOS_VALUE);
1289 }
1290
1291 void exynos_sysmmu_set_df(struct device *dev, dma_addr_t iova)
1292 {
1293 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1294 struct sysmmu_list_data *list;
1295 unsigned long flags;
1296 struct exynos_iovmm *vmm;
1297 int plane;
1298
1299 BUG_ON(!has_sysmmu(dev));
1300
1301 vmm = exynos_get_iovmm(dev);
1302 if (!vmm) {
1303 dev_err(dev, "%s: IOVMM not found\n", __func__);
1304 return;
1305 }
1306
1307 plane = find_iovmm_plane(vmm, iova);
1308 if (plane < 0) {
1309 dev_err(dev, "%s: IOVA %pa is out of IOVMM\n", __func__, &iova);
1310 return;
1311 }
1312
1313 spin_lock_irqsave(&owner->lock, flags);
1314
1315 for_each_sysmmu_list(dev, list) {
1316 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1317
1318 spin_lock(&drvdata->lock);
1319
1320 if (is_sysmmu_active(drvdata) &&
1321 is_sysmmu_runtime_active(drvdata)) {
1322 __master_clk_enable(drvdata);
1323 if (drvdata->prop & SYSMMU_PROP_WINDOW_MASK) {
1324 unsigned long prop;
1325 prop = drvdata->prop & SYSMMU_PROP_WINDOW_MASK;
1326 prop >>= SYSMMU_PROP_WINDOW_SHIFT;
1327 if (prop & (1 << plane))
1328 __exynos_sysmmu_set_df(drvdata, iova);
1329 } else {
1330 __exynos_sysmmu_set_df(drvdata, iova);
1331 }
1332 __master_clk_disable(drvdata);
1333 }
1334 spin_unlock(&drvdata->lock);
1335 }
1336
1337 spin_unlock_irqrestore(&owner->lock, flags);
1338 }
1339
1340 void exynos_sysmmu_release_df(struct device *dev)
1341 {
1342 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1343 struct sysmmu_list_data *list;
1344 unsigned long flags;
1345
1346 BUG_ON(!has_sysmmu(dev));
1347
1348 spin_lock_irqsave(&owner->lock, flags);
1349
1350 for_each_sysmmu_list(dev, list) {
1351 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
1352
1353 spin_lock(&drvdata->lock);
1354 if (is_sysmmu_active(drvdata) &&
1355 is_sysmmu_runtime_active(drvdata)) {
1356 __master_clk_enable(drvdata);
1357 __exynos_sysmmu_release_df(drvdata);
1358 __master_clk_disable(drvdata);
1359 }
1360 spin_unlock(&drvdata->lock);
1361 }
1362
1363 spin_unlock_irqrestore(&owner->lock, flags);
1364 }
1365
1366 static int __init __sysmmu_init_clock(struct device *sysmmu,
1367 struct sysmmu_drvdata *drvdata)
1368 {
1369 int i, ret;
1370
1371 /* Initialize SYSMMU clocks */
1372 for (i = 0; i < SYSMMU_CLK_NUM; i++)
1373 drvdata->clocks[i] = ERR_PTR(-ENOENT);
1374
1375 for (i = 0; i < SYSMMU_CLK_NUM; i++) {
1376 drvdata->clocks[i] =
1377 devm_clk_get(sysmmu, sysmmu_clock_names[i]);
1378 if (IS_ERR(drvdata->clocks[i]) &&
1379 !(drvdata->clocks[i] == ERR_PTR(-ENOENT))) {
1380 dev_err(sysmmu, "Failed to get sysmmu %s clock\n",
1381 sysmmu_clock_names[i]);
1382 return PTR_ERR(drvdata->clocks[i]);
1383 } else if (drvdata->clocks[i] == ERR_PTR(-ENOENT)) {
1384 continue;
1385 }
1386
1387 ret = clk_prepare(drvdata->clocks[i]);
1388 if (ret) {
1389 dev_err(sysmmu, "Failed to prepare sysmmu %s clock\n",
1390 sysmmu_clock_names[i]);
1391 while (i-- > 0) {
1392 if (!IS_ERR(drvdata->clocks[i]))
1393 clk_unprepare(drvdata->clocks[i]);
1394 }
1395 return ret;
1396 }
1397 }
1398
1399 return 0;
1400 }
1401
1402 int __sysmmu_init_pb_info(struct device *sysmmu, struct device *master,
1403 struct sysmmu_drvdata *data, struct device_node *pb_node,
1404 struct of_phandle_args *pb_args, int grp_num)
1405 {
1406 struct pb_info *pb;
1407 const char *s;
1408 int i, ret;
1409
1410 pb = devm_kzalloc(sysmmu, sizeof(*pb), GFP_KERNEL);
1411 if (!pb) {
1412 dev_err(sysmmu, "%s: failed to allocate pb_info[%d]\n",
1413 __func__, grp_num);
1414 return -ENOMEM;
1415 }
1416
1417 pb->master = master;
1418
1419 pb->grp_num = grp_num;
1420 for (i = 0; i < MAX_NUM_PBUF; i++) {
1421 pb->ar_axi_id[i] = -1;
1422 pb->aw_axi_id[i] = -1;
1423 }
1424
1425 INIT_LIST_HEAD(&pb->node);
1426
1427 for (i = 0; i < pb_args->args_count; i++) {
1428 if (is_axi_id(pb_args->args[i])) {
1429 if (is_ar_axi_id(pb_args->args[i])) {
1430 pb->ar_axi_id[pb->ar_id_num] = pb_args->args[i];
1431 pb->ar_id_num++;
1432 } else {
1433 pb->aw_axi_id[pb->aw_id_num] =
1434 pb_args->args[i] & AXIID_MASK;
1435 pb->aw_id_num++;
1436 }
1437 }
1438 }
1439
1440 ret = of_property_read_string(pb_node, "dir", &s);
1441 if (!ret) {
1442 int val;
1443 for (val = 1; val < ARRAY_SIZE(sysmmu_prop_opts); val++) {
1444 if (!strcasecmp(s, sysmmu_prop_opts[val])) {
1445 pb->prop &= ~SYSMMU_PROP_RW_MASK;
1446 pb->prop |= val;
1447 break;
1448 }
1449 }
1450 } else if (ret && ret == -EINVAL) {
1451 pb->prop = SYSMMU_PROP_READWRITE;
1452 } else {
1453 dev_err(sysmmu, "%s: failed to get PB Direction of %s\n",
1454 __func__, pb_args->np->full_name);
1455 devm_kfree(sysmmu, pb);
1456 return ret;
1457 }
1458
1459 list_add_tail(&pb->node, &data->pb_grp_list);
1460
1461 dev_info(sysmmu, "device node[%d] : %s\n",
1462 pb->grp_num, pb_args->np->name);
1463 dev_info(sysmmu, "ar[%d] = {%d, %d, %d, %d, %d, %d}\n",
1464 pb->ar_id_num,
1465 pb->ar_axi_id[0], pb->ar_axi_id[1],
1466 pb->ar_axi_id[2], pb->ar_axi_id[3],
1467 pb->ar_axi_id[4], pb->ar_axi_id[5]);
1468 dev_info(sysmmu, "aw[%d]= {%d, %d, %d, %d, %d, %d}\n",
1469 pb->aw_id_num,
1470 pb->aw_axi_id[0], pb->aw_axi_id[1],
1471 pb->aw_axi_id[2], pb->aw_axi_id[3],
1472 pb->aw_axi_id[4], pb->aw_axi_id[5]);
1473 return 0;
1474 }
1475
1476 int __sysmmu_update_owner(struct device *master, struct device *sysmmu)
1477 {
1478 struct exynos_iommu_owner *owner;
1479 struct sysmmu_list_data *list_data;
1480
1481 owner = master->archdata.iommu;
1482 if (!owner) {
1483 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1484 if (!owner) {
1485 dev_err(master, "%s: Failed to allocate owner structure\n",
1486 __func__);
1487 return -ENOMEM;
1488 }
1489
1490 INIT_LIST_HEAD(&owner->mmu_list);
1491 INIT_LIST_HEAD(&owner->client);
1492 owner->dev = master;
1493 spin_lock_init(&owner->lock);
1494
1495 master->archdata.iommu = owner;
1496 if (!sysmmu_owner_list) {
1497 sysmmu_owner_list = owner;
1498 } else {
1499 owner->next = sysmmu_owner_list->next;
1500 sysmmu_owner_list->next = owner;
1501 }
1502 }
1503
1504 list_for_each_entry(list_data, &owner->mmu_list, node)
1505 if (list_data->sysmmu == sysmmu)
1506 return 0;
1507
1508 list_data = devm_kzalloc(sysmmu, sizeof(*list_data), GFP_KERNEL);
1509 if (!list_data) {
1510 dev_err(sysmmu, "%s: Failed to allocate sysmmu_list_data\n",
1511 __func__);
1512 return -ENOMEM;
1513 }
1514
1515 INIT_LIST_HEAD(&list_data->node);
1516 list_data->sysmmu = sysmmu;
1517
1518 /*
1519 * System MMUs are attached in the order of the presence
1520 * in device tree
1521 */
1522 list_add_tail(&list_data->node, &owner->mmu_list);
1523 dev_info(master, "--> %s\n", dev_name(sysmmu));
1524
1525 return 0;
1526 }
1527
1528 static struct platform_device * __init __sysmmu_init_owner(struct device *sysmmu,
1529 struct sysmmu_drvdata *data,
1530 struct of_phandle_args *pb_args)
1531 {
1532 struct device_node *master_node = pb_args->np;
1533 struct platform_device *master;
1534 int ret;
1535
1536 master = of_find_device_by_node(master_node);
1537 if (!master) {
1538 pr_err("%s: failed to get master device in '%s'\n",
1539 __func__, master_node->full_name);
1540 return ERR_PTR(-EINVAL);
1541 }
1542
1543 ret = __sysmmu_update_owner(&master->dev, sysmmu);
1544 if (ret) {
1545 pr_err("%s: failed to update iommu owner '%s'\n",
1546 __func__, dev_name(&master->dev));
1547 of_node_put(master_node);
1548 return ERR_PTR(-EINVAL);
1549 }
1550
1551 of_node_put(master_node);
1552
1553 return master;
1554 }
1555
1556 static int __init __sysmmu_init_master_info(struct device *sysmmu,
1557 struct sysmmu_drvdata *data)
1558 {
1559 struct device_node *node;
1560 struct device_node *pb_info;
1561 int grp_num = 0;
1562 int ret = 0;
1563
1564 pb_info = of_get_child_by_name(sysmmu->of_node, "pb-info");
1565 if (!pb_info) {
1566 pr_info("%s: 'master-info' node not found from '%s' node\n",
1567 __func__, dev_name(sysmmu));
1568 return 0;
1569 }
1570
1571 INIT_LIST_HEAD(&data->pb_grp_list);
1572
1573 for_each_child_of_node(pb_info, node) {
1574 struct of_phandle_args pb_args;
1575 struct platform_device *master;
1576 int i, master_cnt = 0;
1577
1578 master_cnt = of_count_phandle_with_args(node,
1579 "master_axi_id_list",
1580 "#pb-id-cells");
1581
1582 for (i = 0; i < master_cnt; i++) {
1583 memset(&pb_args, 0x0, sizeof(pb_args));
1584 ret = of_parse_phandle_with_args(node,
1585 "master_axi_id_list",
1586 "#pb-id-cells", i, &pb_args);
1587 if (ret) {
1588 of_node_put(node);
1589 pr_err("%s: failed to get PB info of %s\n",
1590 __func__, dev_name(data->sysmmu));
1591 return ret;
1592 }
1593
1594 master = __sysmmu_init_owner(sysmmu, data, &pb_args);
1595 if (IS_ERR(master)) {
1596 of_node_put(node);
1597 of_node_put(pb_args.np);
1598 pr_err("%s: failed to initialize sysmmu(%s)"
1599 "owner info\n", __func__,
1600 dev_name(data->sysmmu));
1601 return PTR_ERR(master);
1602 }
1603
1604 ret = __sysmmu_init_pb_info(sysmmu, &master->dev, data,
1605 node, &pb_args, grp_num);
1606 if (ret) {
1607 of_node_put(node);
1608 of_node_put(pb_args.np);
1609 pr_err("%s: failed to update pb axi id '%s'\n",
1610 __func__, dev_name(sysmmu));
1611 break;
1612 }
1613
1614 of_node_put(pb_args.np);
1615 }
1616
1617 if (ret) {
1618 struct pb_info *pb;
1619 while (!list_empty(&data->pb_grp_list)) {
1620 pb = list_entry(data->pb_grp_list.next,
1621 struct pb_info, node);
1622 list_del(&pb->node);
1623 kfree(pb);
1624 }
1625 }
1626
1627 of_node_put(node);
1628 grp_num++;
1629 }
1630
1631 return ret;
1632 }
1633
1634 static int __init __sysmmu_init_prop(struct device *sysmmu,
1635 struct sysmmu_drvdata *drvdata)
1636 {
1637 struct device_node *prop_node;
1638 const char *s;
1639 unsigned int qos = DEFAULT_QOS_VALUE;
1640 int ret;
1641
1642 ret = of_property_read_u32_index(sysmmu->of_node, "qos", 0, &qos);
1643
1644 if ((ret == 0) && (qos > 15)) {
1645 dev_err(sysmmu, "%s: Invalid QoS value %d specified\n",
1646 __func__, qos);
1647 qos = DEFAULT_QOS_VALUE;
1648 }
1649
1650 drvdata->qos = (short)qos;
1651
1652 /**
1653 * Deprecate 'prop-map' child node of System MMU device nodes in FDT.
1654 * It is not required to introduce new child node for boolean
1655 * properties like 'block-stop' and 'tlbinv-nonblock'.
1656 * 'tlbinv-nonblock' is H/W W/A to accellerates master H/W performance
1657 * for 5.x and the earlier versions of System MMU.x.
1658 * 'sysmmu,tlbinv-nonblock' is introduced, instead for those earlier
1659 * versions.
1660 * Instead of 'block-stop' in 'prop-map' childe node,
1661 * 'sysmmu,block-when-stop' without a value is introduced to simplify
1662 * the FDT node definitions.
1663 * For the compatibility with the existing FDT files, the 'prop-map'
1664 * child node parsing is still kept.
1665 */
1666 prop_node = of_get_child_by_name(sysmmu->of_node, "prop-map");
1667 if (prop_node) {
1668 if (!of_property_read_string(prop_node, "tlbinv-nonblock", &s))
1669 if (strnicmp(s, "yes", 3) == 0)
1670 drvdata->prop |= SYSMMU_PROP_NONBLOCK_TLBINV;
1671
1672 if (!of_property_read_string(prop_node, "block-stop", &s))
1673 if (strnicmp(s, "yes", 3) == 0)
1674 drvdata->prop |= SYSMMU_PROP_STOP_BLOCK;
1675
1676 of_node_put(prop_node);
1677 }
1678
1679 if (of_find_property(sysmmu->of_node, "sysmmu,block-when-stop", NULL))
1680 drvdata->prop |= SYSMMU_PROP_STOP_BLOCK;
1681
1682 if (of_find_property(sysmmu->of_node, "sysmmu,tlbinv-nonblock", NULL))
1683 drvdata->prop |= SYSMMU_PROP_NONBLOCK_TLBINV;
1684
1685 if (of_find_property(sysmmu->of_node, "sysmmu,acg_disable", NULL))
1686 drvdata->prop |= SYSMMU_PROP_DISABLE_ACG;
1687
1688 return 0;
1689 }
1690
1691 static int __init __sysmmu_setup(struct device *sysmmu,
1692 struct sysmmu_drvdata *drvdata)
1693 {
1694 int ret;
1695
1696 ret = __sysmmu_init_prop(sysmmu, drvdata);
1697 if (ret) {
1698 dev_err(sysmmu, "Failed to initialize sysmmu properties\n");
1699 return ret;
1700 }
1701
1702 ret = __sysmmu_init_clock(sysmmu, drvdata);
1703 if (ret) {
1704 dev_err(sysmmu, "Failed to initialize gating clocks\n");
1705 return ret;
1706 }
1707
1708 ret = __sysmmu_init_master_info(sysmmu, drvdata);
1709 if (ret) {
1710 int i;
1711 for (i = 0; i < SYSMMU_CLK_NUM; i++) {
1712 if (!IS_ERR(drvdata->clocks[i]))
1713 clk_unprepare(drvdata->clocks[i]);
1714 }
1715 dev_err(sysmmu, "Failed to initialize master device.\n");
1716 }
1717
1718 return ret;
1719 }
1720
1721 static int __init exynos_sysmmu_probe(struct platform_device *pdev)
1722 {
1723 int ret;
1724 struct device *dev = &pdev->dev;
1725 struct sysmmu_drvdata *data;
1726 struct resource *res;
1727
1728 data = devm_kzalloc(dev, sizeof(*data) , GFP_KERNEL);
1729 if (!data) {
1730 dev_err(dev, "Not enough memory\n");
1731 return -ENOMEM;
1732 }
1733
1734 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1735 if (!res) {
1736 dev_err(dev, "Unable to find IOMEM region\n");
1737 return -ENOENT;
1738 }
1739
1740 data->sfrbase = devm_request_and_ioremap(dev, res);
1741 if (!data->sfrbase) {
1742 dev_err(dev, "Unable to map IOMEM @ PA:%pa\n", &res->start);
1743 return -EBUSY;
1744 }
1745
1746 ret = platform_get_irq(pdev, 0);
1747 if (ret <= 0) {
1748 dev_err(dev, "Unable to find IRQ resource\n");
1749 return ret;
1750 }
1751
1752 ret = devm_request_irq(dev, ret, exynos_sysmmu_irq, 0,
1753 dev_name(dev), data);
1754 if (ret) {
1755 dev_err(dev, "Unabled to register interrupt handler\n");
1756 return ret;
1757 }
1758
1759 pm_runtime_enable(dev);
1760
1761 ret = exynos_iommu_init_event_log(SYSMMU_DRVDATA_TO_LOG(data),
1762 SYSMMU_LOG_LEN);
1763 if (!ret)
1764 sysmmu_add_log_to_debugfs(exynos_sysmmu_debugfs_root,
1765 SYSMMU_DRVDATA_TO_LOG(data), dev_name(dev));
1766 else
1767 return ret;
1768
1769 ret = __sysmmu_setup(dev, data);
1770 if (!ret) {
1771 if (!pm_runtime_enabled(dev))
1772 get_sysmmu_runtime_active(data);
1773 data->sysmmu = dev;
1774 ATOMIC_INIT_NOTIFIER_HEAD(&data->fault_notifiers);
1775 spin_lock_init(&data->lock);
1776 if (!sysmmu_drvdata_list) {
1777 sysmmu_drvdata_list = data;
1778 } else {
1779 data->next = sysmmu_drvdata_list->next;
1780 sysmmu_drvdata_list->next = data;
1781 }
1782
1783 platform_set_drvdata(pdev, data);
1784
1785 dev_info(dev, "[OK]\n");
1786 }
1787
1788 return ret;
1789 }
1790
1791 #ifdef CONFIG_OF
1792 static struct of_device_id sysmmu_of_match[] __initconst = {
1793 { .compatible = "samsung,exynos7420-sysmmu", },
1794 { },
1795 };
1796 #endif
1797
1798 static struct platform_driver exynos_sysmmu_driver __refdata = {
1799 .probe = exynos_sysmmu_probe,
1800 .driver = {
1801 .owner = THIS_MODULE,
1802 .name = MODULE_NAME,
1803 .of_match_table = of_match_ptr(sysmmu_of_match),
1804 }
1805 };
1806
1807 static int exynos_iommu_domain_init(struct iommu_domain *domain)
1808 {
1809 struct exynos_iommu_domain *priv;
1810
1811 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1812 if (!priv)
1813 return -ENOMEM;
1814
1815 priv->pgtable = (sysmmu_pte_t *)__get_free_pages(
1816 GFP_KERNEL | __GFP_ZERO, 2);
1817 if (!priv->pgtable)
1818 goto err_pgtable;
1819
1820 priv->lv2entcnt = (atomic_t *)__get_free_pages(
1821 GFP_KERNEL | __GFP_ZERO, 2);
1822 if (!priv->lv2entcnt)
1823 goto err_counter;
1824
1825 if (exynos_iommu_init_event_log(IOMMU_PRIV_TO_LOG(priv), IOMMU_LOG_LEN))
1826 goto err_init_event_log;
1827
1828 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
1829
1830 spin_lock_init(&priv->lock);
1831 spin_lock_init(&priv->pgtablelock);
1832 INIT_LIST_HEAD(&priv->clients);
1833
1834 domain->priv = priv;
1835 domain->handler = recover_fault_handler;
1836 return 0;
1837
1838 err_init_event_log:
1839 free_pages((unsigned long)priv->lv2entcnt, 2);
1840 err_counter:
1841 free_pages((unsigned long)priv->pgtable, 2);
1842 err_pgtable:
1843 kfree(priv);
1844 return -ENOMEM;
1845 }
1846
1847 static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
1848 {
1849 struct exynos_iommu_domain *priv = domain->priv;
1850 struct exynos_iommu_owner *owner;
1851 unsigned long flags;
1852 int i;
1853
1854 WARN_ON(!list_empty(&priv->clients));
1855
1856 spin_lock_irqsave(&priv->lock, flags);
1857
1858 list_for_each_entry(owner, &priv->clients, client)
1859 while (!exynos_sysmmu_disable(owner->dev))
1860 ; /* until System MMU is actually disabled */
1861
1862 while (!list_empty(&priv->clients))
1863 list_del_init(priv->clients.next);
1864
1865 spin_unlock_irqrestore(&priv->lock, flags);
1866
1867 for (i = 0; i < NUM_LV1ENTRIES; i++)
1868 if (lv1ent_page(priv->pgtable + i))
1869 kmem_cache_free(lv2table_kmem_cache,
1870 __va(lv2table_base(priv->pgtable + i)));
1871
1872 free_pages((unsigned long)priv->pgtable, 2);
1873 free_pages((unsigned long)priv->lv2entcnt, 2);
1874 kfree(domain->priv);
1875 domain->priv = NULL;
1876 }
1877
1878 static int exynos_iommu_attach_device(struct iommu_domain *domain,
1879 struct device *dev)
1880 {
1881 struct exynos_iommu_domain *priv = domain->priv;
1882 phys_addr_t pgtable = virt_to_phys(priv->pgtable);
1883 unsigned long flags;
1884 int ret;
1885
1886 spin_lock_irqsave(&priv->lock, flags);
1887
1888 ret = __exynos_sysmmu_enable(dev, __pa(priv->pgtable), domain);
1889
1890 spin_unlock_irqrestore(&priv->lock, flags);
1891
1892 if (ret < 0) {
1893 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
1894 __func__, &pgtable);
1895 } else {
1896 SYSMMU_EVENT_LOG_IOMMU_ATTACH(IOMMU_PRIV_TO_LOG(priv), dev);
1897 TRACE_LOG_DEV(dev,
1898 "%s: Attached new IOMMU with pgtable %pa %s\n",
1899 __func__, &pgtable, (ret == 0) ? "" : ", again");
1900 ret = 0;
1901 }
1902
1903 return ret;
1904 }
1905
1906 static void exynos_iommu_detach_device(struct iommu_domain *domain,
1907 struct device *dev)
1908 {
1909 struct exynos_iommu_owner *owner;
1910 struct exynos_iommu_domain *priv = domain->priv;
1911 unsigned long flags;
1912
1913 spin_lock_irqsave(&priv->lock, flags);
1914
1915 list_for_each_entry(owner, &priv->clients, client) {
1916 if (owner == dev->archdata.iommu) {
1917 exynos_sysmmu_disable(owner->dev);
1918 break;
1919 }
1920 }
1921
1922 spin_unlock_irqrestore(&priv->lock, flags);
1923
1924 if (owner == dev->archdata.iommu) {
1925 SYSMMU_EVENT_LOG_IOMMU_DETACH(IOMMU_PRIV_TO_LOG(priv), dev);
1926 TRACE_LOG_DEV(dev, "%s: Detached IOMMU with pgtable %#lx\n",
1927 __func__, __pa(priv->pgtable));
1928 } else {
1929 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
1930 }
1931 }
1932
1933 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
1934 sysmmu_pte_t *sent, unsigned long iova, atomic_t *pgcounter)
1935 {
1936 if (lv1ent_fault(sent)) {
1937 unsigned long flags;
1938 spin_lock_irqsave(&priv->pgtablelock, flags);
1939 if (lv1ent_fault(sent)) {
1940 sysmmu_pte_t *pent;
1941
1942 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1943 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
1944 if (!pent) {
1945 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1946 return ERR_PTR(-ENOMEM);
1947 }
1948
1949 *sent = mk_lv1ent_page(__pa(pent));
1950 kmemleak_ignore(pent);
1951 atomic_set(pgcounter, NUM_LV2ENTRIES);
1952 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
1953 pgtable_flush(sent, sent + 1);
1954 SYSMMU_EVENT_LOG_IOMMU_ALLOCSLPD(IOMMU_PRIV_TO_LOG(priv),
1955 iova & SECT_MASK);
1956 }
1957 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1958 } else if (!lv1ent_page(sent)) {
1959 BUG();
1960 return ERR_PTR(-EADDRINUSE);
1961 }
1962
1963 return page_entry(sent, iova);
1964 }
1965
1966 static int lv1ent_check_page(struct exynos_iommu_domain *priv,
1967 sysmmu_pte_t *sent, atomic_t *pgcnt)
1968 {
1969 if (lv1ent_page(sent)) {
1970 if (WARN_ON(atomic_read(pgcnt) != NUM_LV2ENTRIES))
1971 return -EADDRINUSE;
1972 }
1973
1974 return 0;
1975 }
1976
1977 static void clear_lv1_page_table(sysmmu_pte_t *ent, int n)
1978 {
1979 if (n > 0)
1980 memset(ent, 0, sizeof(*ent) * n);
1981 }
1982
1983 static void clear_lv2_page_table(sysmmu_pte_t *ent, int n)
1984 {
1985 if (n > 0)
1986 memset(ent, 0, sizeof(*ent) * n);
1987 }
1988
1989 static int lv1set_section(struct exynos_iommu_domain *priv,
1990 sysmmu_pte_t *sent, phys_addr_t paddr,
1991 size_t size, atomic_t *pgcnt)
1992 {
1993 int ret;
1994
1995 if (WARN_ON(!lv1ent_fault(sent) && !lv1ent_page(sent)))
1996 return -EADDRINUSE;
1997
1998 if (size == SECT_SIZE) {
1999 ret = lv1ent_check_page(priv, sent, pgcnt);
2000 if (ret)
2001 return ret;
2002 *sent = mk_lv1ent_sect(paddr);
2003 pgtable_flush(sent, sent + 1);
2004 } else if (size == DSECT_SIZE) {
2005 int i;
2006 for (i = 0; i < SECT_PER_DSECT; i++, sent++, pgcnt++) {
2007 ret = lv1ent_check_page(priv, sent, pgcnt);
2008 if (ret) {
2009 clear_lv1_page_table(sent - i, i);
2010 return ret;
2011 }
2012 *sent = mk_lv1ent_dsect(paddr);
2013 }
2014 pgtable_flush(sent - SECT_PER_DSECT, sent);
2015 } else {
2016 int i;
2017 for (i = 0; i < SECT_PER_SPSECT; i++, sent++, pgcnt++) {
2018 ret = lv1ent_check_page(priv, sent, pgcnt);
2019 if (ret) {
2020 clear_lv1_page_table(sent - i, i);
2021 return ret;
2022 }
2023 *sent = mk_lv1ent_spsect(paddr);
2024 }
2025 pgtable_flush(sent - SECT_PER_SPSECT, sent);
2026 }
2027
2028 return 0;
2029 }
2030
2031 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr,
2032 size_t size, atomic_t *pgcnt)
2033 {
2034 if (size == SPAGE_SIZE) {
2035 if (WARN_ON(!lv2ent_fault(pent)))
2036 return -EADDRINUSE;
2037
2038 *pent = mk_lv2ent_spage(paddr);
2039 pgtable_flush(pent, pent + 1);
2040 atomic_dec(pgcnt);
2041 } else { /* size == LPAGE_SIZE */
2042 int i;
2043 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
2044 if (WARN_ON(!lv2ent_fault(pent))) {
2045 clear_lv2_page_table(pent - i, i);
2046 return -EADDRINUSE;
2047 }
2048
2049 *pent = mk_lv2ent_lpage(paddr);
2050 }
2051 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
2052 atomic_sub(SPAGES_PER_LPAGE, pgcnt);
2053 }
2054
2055 return 0;
2056 }
2057
2058 static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
2059 phys_addr_t paddr, size_t size, int prot)
2060 {
2061 struct exynos_iommu_domain *priv = domain->priv;
2062 sysmmu_pte_t *entry;
2063 int ret = -ENOMEM;
2064
2065 BUG_ON(priv->pgtable == NULL);
2066
2067 entry = section_entry(priv->pgtable, iova);
2068
2069 if (size >= SECT_SIZE) {
2070 ret = lv1set_section(priv, entry, paddr, size,
2071 &priv->lv2entcnt[lv1ent_offset(iova)]);
2072
2073 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv),
2074 iova, iova + size, paddr / SPAGE_SIZE);
2075 } else {
2076 sysmmu_pte_t *pent;
2077 pent = alloc_lv2entry(priv, entry, iova,
2078 &priv->lv2entcnt[lv1ent_offset(iova)]);
2079 if (IS_ERR(pent)) {
2080 ret = PTR_ERR(pent);
2081 } else {
2082 ret = lv2set_page(pent, paddr, size,
2083 &priv->lv2entcnt[lv1ent_offset(iova)]);
2084
2085 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv),
2086 iova, iova + size, paddr / SPAGE_SIZE);
2087 }
2088 }
2089
2090 if (ret)
2091 pr_err("%s: Failed(%d) to map %#zx bytes @ %pa\n",
2092 __func__, ret, size, &iova);
2093
2094
2095 return ret;
2096 }
2097
2098 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
2099 unsigned long iova)
2100 {
2101 struct exynos_iommu_owner *owner;
2102 unsigned long flags;
2103
2104 spin_lock_irqsave(&priv->lock, flags);
2105
2106 list_for_each_entry(owner, &priv->clients, client)
2107 sysmmu_tlb_invalidate_entry(owner->dev, iova, false);
2108
2109 spin_unlock_irqrestore(&priv->lock, flags);
2110 }
2111
2112 static size_t exynos_iommu_unmap(struct iommu_domain *domain,
2113 unsigned long iova, size_t size)
2114 {
2115 struct exynos_iommu_domain *priv = domain->priv;
2116 size_t err_pgsize;
2117 sysmmu_pte_t *sent, *pent;
2118 atomic_t *lv2entcnt = &priv->lv2entcnt[lv1ent_offset(iova)];
2119
2120 BUG_ON(priv->pgtable == NULL);
2121
2122 sent = section_entry(priv->pgtable, iova);
2123
2124 if (lv1ent_spsection(sent)) {
2125 if (WARN_ON(size < SPSECT_SIZE)) {
2126 err_pgsize = SPSECT_SIZE;
2127 goto err;
2128 }
2129
2130 clear_lv1_page_table(sent, SECT_PER_SPSECT);
2131
2132 pgtable_flush(sent, sent + SECT_PER_SPSECT);
2133 size = SPSECT_SIZE;
2134 goto done;
2135 }
2136
2137 if (lv1ent_dsection(sent)) {
2138 if (WARN_ON(size < DSECT_SIZE)) {
2139 err_pgsize = DSECT_SIZE;
2140 goto err;
2141 }
2142
2143 *sent = 0;
2144 *(++sent) = 0;
2145 pgtable_flush(sent, sent + 2);
2146 size = DSECT_SIZE;
2147 goto done;
2148 }
2149
2150 if (lv1ent_section(sent)) {
2151 if (WARN_ON(size < SECT_SIZE)) {
2152 err_pgsize = SECT_SIZE;
2153 goto err;
2154 }
2155
2156 *sent = 0;
2157 pgtable_flush(sent, sent + 1);
2158 size = SECT_SIZE;
2159 goto done;
2160 }
2161
2162 if (unlikely(lv1ent_fault(sent))) {
2163 if (size > SECT_SIZE)
2164 size = SECT_SIZE;
2165 goto done;
2166 }
2167
2168 /* lv1ent_page(sent) == true here */
2169
2170 pent = page_entry(sent, iova);
2171
2172 if (unlikely(lv2ent_fault(pent))) {
2173 size = SPAGE_SIZE;
2174 goto done;
2175 }
2176
2177 if (lv2ent_small(pent)) {
2178 *pent = 0;
2179 size = SPAGE_SIZE;
2180 pgtable_flush(pent, pent + 1);
2181 atomic_inc(lv2entcnt);
2182 goto unmap_flpd;
2183 }
2184
2185 /* lv1ent_large(ent) == true here */
2186 if (WARN_ON(size < LPAGE_SIZE)) {
2187 err_pgsize = LPAGE_SIZE;
2188 goto err;
2189 }
2190
2191 clear_lv2_page_table(pent, SPAGES_PER_LPAGE);
2192 pgtable_flush(pent, pent + SPAGES_PER_LPAGE);
2193 size = LPAGE_SIZE;
2194 atomic_add(SPAGES_PER_LPAGE, lv2entcnt);
2195
2196 unmap_flpd:
2197 if (atomic_read(lv2entcnt) == NUM_LV2ENTRIES) {
2198 unsigned long flags;
2199 spin_lock_irqsave(&priv->pgtablelock, flags);
2200 if (atomic_read(lv2entcnt) == NUM_LV2ENTRIES) {
2201 kmem_cache_free(lv2table_kmem_cache,
2202 page_entry(sent, 0));
2203 atomic_set(lv2entcnt, 0);
2204 *sent = 0;
2205
2206 SYSMMU_EVENT_LOG_IOMMU_FREESLPD(IOMMU_PRIV_TO_LOG(priv), iova_from_sent(priv->pgtable, sent));
2207 }
2208 spin_unlock_irqrestore(&priv->pgtablelock, flags);
2209 }
2210
2211 done:
2212 SYSMMU_EVENT_LOG_IOMMU_UNMAP(IOMMU_PRIV_TO_LOG(priv),
2213 iova, iova + size);
2214
2215 exynos_iommu_tlb_invalidate_entry(priv, iova);
2216
2217 /* TLB invalidation is performed by IOVMM */
2218 return size;
2219
2220 err:
2221 pr_err("%s: Failed: size(%#zx) @ %pa is smaller than page size %#zx\n",
2222 __func__, size, &iova, err_pgsize);
2223
2224 return 0;
2225 }
2226
2227 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
2228 dma_addr_t iova)
2229 {
2230 struct exynos_iommu_domain *priv = domain->priv;
2231 sysmmu_pte_t *entry;
2232 phys_addr_t phys = 0;
2233
2234 entry = section_entry(priv->pgtable, iova);
2235
2236 if (lv1ent_spsection(entry)) {
2237 phys = spsection_phys(entry) + spsection_offs(iova);
2238 } else if (lv1ent_dsection(entry)) {
2239 phys = dsection_phys(entry) + dsection_offs(iova);
2240 } else if (lv1ent_section(entry)) {
2241 phys = section_phys(entry) + section_offs(iova);
2242 } else if (lv1ent_page(entry)) {
2243 entry = page_entry(entry, iova);
2244
2245 if (lv2ent_large(entry))
2246 phys = lpage_phys(entry) + lpage_offs(iova);
2247 else if (lv2ent_small(entry))
2248 phys = spage_phys(entry) + spage_offs(iova);
2249 }
2250
2251 return phys;
2252 }
2253
2254 static struct iommu_ops exynos_iommu_ops = {
2255 .domain_init = &exynos_iommu_domain_init,
2256 .domain_destroy = &exynos_iommu_domain_destroy,
2257 .attach_dev = &exynos_iommu_attach_device,
2258 .detach_dev = &exynos_iommu_detach_device,
2259 .map = &exynos_iommu_map,
2260 .unmap = &exynos_iommu_unmap,
2261 .iova_to_phys = &exynos_iommu_iova_to_phys,
2262 .pgsize_bitmap = PGSIZE_BITMAP,
2263 };
2264
2265 static int __sysmmu_unmap_user_pages(struct device *dev,
2266 struct mm_struct *mm,
2267 unsigned long vaddr,
2268 exynos_iova_t iova,
2269 size_t size)
2270 {
2271 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2272 struct exynos_iovmm *vmm = owner->vmm_data;
2273 struct iommu_domain *domain = vmm->domain;
2274 struct exynos_iommu_domain *priv = domain->priv;
2275 struct vm_area_struct *vma;
2276 unsigned long start = vaddr & PAGE_MASK;
2277 unsigned long end = PAGE_ALIGN(vaddr + size);
2278 bool is_pfnmap;
2279 sysmmu_pte_t *sent, *pent;
2280 int ret = 0;
2281
2282 down_read(&mm->mmap_sem);
2283
2284 BUG_ON((vaddr + size) < vaddr);
2285 /*
2286 * Assumes that the VMA is safe.
2287 * The caller must check the range of address space before calling this.
2288 */
2289 vma = find_vma(mm, vaddr);
2290 if (!vma) {
2291 pr_err("%s: vma is null\n", __func__);
2292 ret = -EINVAL;
2293 goto out_unmap;
2294 }
2295
2296 if (vma->vm_end < (vaddr + size)) {
2297 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
2298 __func__, vma->vm_start, vma->vm_end, vaddr, size);
2299 ret = -EINVAL;
2300 goto out_unmap;
2301 }
2302
2303 is_pfnmap = vma->vm_flags & VM_PFNMAP;
2304
2305 TRACE_LOG_DEV(dev, "%s: unmap starts @ %#zx@%#lx\n",
2306 __func__, size, start);
2307
2308 do {
2309 sysmmu_pte_t *pent_first;
2310
2311 sent = section_entry(priv->pgtable, iova);
2312 if (lv1ent_fault(sent)) {
2313 ret = -EFAULT;
2314 goto out_unmap;
2315 }
2316
2317 pent = page_entry(sent, iova);
2318 if (lv2ent_fault(pent)) {
2319 ret = -EFAULT;
2320 goto out_unmap;
2321 }
2322
2323 pent_first = pent;
2324
2325 do {
2326 if (!lv2ent_fault(pent) && !is_pfnmap)
2327 put_page(phys_to_page(spage_phys(pent)));
2328
2329 *pent = 0;
2330 if (lv2ent_offset(iova) == NUM_LV2ENTRIES - 1) {
2331 pgtable_flush(pent_first, pent);
2332 iova += PAGE_SIZE;
2333 sent = section_entry(priv->pgtable, iova);
2334 if (lv1ent_fault(sent)) {
2335 ret = -EFAULT;
2336 goto out_unmap;
2337 }
2338
2339 pent = page_entry(sent, iova);
2340 if (lv2ent_fault(pent)) {
2341 ret = -EFAULT;
2342 goto out_unmap;
2343 }
2344
2345 pent_first = pent;
2346 } else {
2347 iova += PAGE_SIZE;
2348 pent++;
2349 }
2350 } while (start += PAGE_SIZE, start != end);
2351
2352 if (pent_first != pent)
2353 pgtable_flush(pent_first, pent);
2354 } while (start != end);
2355
2356 TRACE_LOG_DEV(dev, "%s: unmap done @ %#lx\n", __func__, start);
2357
2358 out_unmap:
2359 up_read(&mm->mmap_sem);
2360
2361 if (ret) {
2362 pr_debug("%s: Ignoring unmapping for %#lx ~ %#lx\n",
2363 __func__, start, end);
2364 }
2365
2366 return ret;
2367 }
2368
2369 static sysmmu_pte_t *alloc_lv2entry_fast(struct exynos_iommu_domain *priv,
2370 sysmmu_pte_t *sent, unsigned long iova)
2371 {
2372 if (lv1ent_fault(sent)) {
2373 sysmmu_pte_t *pent;
2374
2375 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
2376 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
2377 if (!pent)
2378 return ERR_PTR(-ENOMEM);
2379
2380 *sent = mk_lv1ent_page(virt_to_phys(pent));
2381 kmemleak_ignore(pent);
2382 pgtable_flush(sent, sent + 1);
2383 } else if (WARN_ON(!lv1ent_page(sent))) {
2384 return ERR_PTR(-EADDRINUSE);
2385 }
2386
2387 return page_entry(sent, iova);
2388 }
2389
2390 int exynos_sysmmu_map_user_pages(struct device *dev,
2391 struct mm_struct *mm,
2392 unsigned long vaddr,
2393 exynos_iova_t iova,
2394 size_t size, bool write,
2395 bool shareable)
2396 {
2397 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2398 struct exynos_iovmm *vmm = owner->vmm_data;
2399 struct iommu_domain *domain = vmm->domain;
2400 struct exynos_iommu_domain *priv = domain->priv;
2401 exynos_iova_t iova_start = iova;
2402 struct vm_area_struct *vma;
2403 unsigned long start, end;
2404 unsigned long pgd_next;
2405 int ret = -EINVAL;
2406 bool is_pfnmap;
2407 pgd_t *pgd;
2408
2409 if (WARN_ON(size == 0))
2410 return 0;
2411
2412 down_read(&mm->mmap_sem);
2413
2414 /*
2415 * Assumes that the VMA is safe.
2416 * The caller must check the range of address space before calling this.
2417 */
2418 vma = find_vma(mm, vaddr);
2419 if (!vma) {
2420 pr_err("%s: vma is null\n", __func__);
2421 up_read(&mm->mmap_sem);
2422 return -EINVAL;
2423 }
2424
2425 if (vma->vm_end < (vaddr + size)) {
2426 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
2427 __func__, vma->vm_start, vma->vm_end, vaddr, size);
2428 up_read(&mm->mmap_sem);
2429 return -EINVAL;
2430 }
2431
2432 is_pfnmap = vma->vm_flags & VM_PFNMAP;
2433
2434 start = vaddr & PAGE_MASK;
2435 end = PAGE_ALIGN(vaddr + size);
2436
2437 TRACE_LOG_DEV(dev, "%s: map @ %#lx--%#lx, %zd bytes, vm_flags: %#lx\n",
2438 __func__, start, end, size, vma->vm_flags);
2439
2440 pgd = pgd_offset(mm, start);
2441 do {
2442 unsigned long pmd_next;
2443 pmd_t *pmd;
2444
2445 if (pgd_none_or_clear_bad(pgd)) {
2446 ret = -EBADR;
2447 goto out_unmap;
2448 }
2449
2450 pgd_next = pgd_addr_end(start, end);
2451 pmd = pmd_offset((pud_t *)pgd, start);
2452
2453 do {
2454 pte_t *pte;
2455 sysmmu_pte_t *pent, *pent_first;
2456 sysmmu_pte_t *sent;
2457 spinlock_t *ptl;
2458
2459 if (pmd_none(*pmd)) {
2460 pmd = pmd_alloc(mm, (pud_t *)pgd, start);
2461 if (!pmd) {
2462 pr_err("%s: failed to alloc pmd\n",
2463 __func__);
2464 ret = -ENOMEM;
2465 goto out_unmap;
2466 }
2467
2468 if (__pte_alloc(mm, vma, pmd, start)) {
2469 pr_err("%s: failed to alloc pte\n",
2470 __func__);
2471 ret = -ENOMEM;
2472 goto out_unmap;
2473 }
2474 } else if (pmd_bad(*pmd)) {
2475 pr_err("%s: bad pmd value %#lx\n", __func__,
2476 (unsigned long)pmd_val(*pmd));
2477 pmd_clear_bad(pmd);
2478 ret = -EBADR;
2479 goto out_unmap;
2480 }
2481
2482 pmd_next = pmd_addr_end(start, pgd_next);
2483 pte = pte_offset_map(pmd, start);
2484
2485 sent = section_entry(priv->pgtable, iova);
2486 pent = alloc_lv2entry_fast(priv, sent, iova);
2487 if (IS_ERR(pent)) {
2488 ret = PTR_ERR(pent); /* ENOMEM or EADDRINUSE */
2489 goto out_unmap;
2490 }
2491
2492 pent_first = pent;
2493 ptl = pte_lockptr(mm, pmd);
2494
2495 spin_lock(ptl);
2496 do {
2497 WARN_ON(!lv2ent_fault(pent));
2498
2499 if (!pte_present(*pte) ||
2500 (write && !pte_write(*pte))) {
2501 if (pte_present(*pte) || pte_none(*pte)) {
2502 spin_unlock(ptl);
2503 ret = handle_pte_fault(mm,
2504 vma, start, pte, pmd,
2505 write ? FAULT_FLAG_WRITE : 0);
2506 if (IS_ERR_VALUE(ret)) {
2507 ret = -EIO;
2508 goto out_unmap;
2509 }
2510 spin_lock(ptl);
2511 }
2512 }
2513
2514 if (!pte_present(*pte) ||
2515 (write && !pte_write(*pte))) {
2516 ret = -EPERM;
2517 spin_unlock(ptl);
2518 goto out_unmap;
2519 }
2520
2521 if (!is_pfnmap)
2522 get_page(pte_page(*pte));
2523 *pent = mk_lv2ent_spage(__pfn_to_phys(
2524 pte_pfn(*pte)));
2525 if (shareable)
2526 set_lv2ent_shareable(pent);
2527
2528 if (lv2ent_offset(iova) == (NUM_LV2ENTRIES - 1)) {
2529 pgtable_flush(pent_first, pent);
2530 iova += PAGE_SIZE;
2531 sent = section_entry(priv->pgtable, iova);
2532 pent = alloc_lv2entry_fast(priv, sent, iova);
2533 if (IS_ERR(pent)) {
2534 ret = PTR_ERR(pent);
2535 spin_unlock(ptl);
2536 goto out_unmap;
2537 }
2538 pent_first = pent;
2539 } else {
2540 iova += PAGE_SIZE;
2541 pent++;
2542 }
2543 } while (pte++, start += PAGE_SIZE, start < pmd_next);
2544
2545 if (pent_first != pent)
2546 pgtable_flush(pent_first, pent);
2547 spin_unlock(ptl);
2548 } while (pmd++, start = pmd_next, start != pgd_next);
2549
2550 } while (pgd++, start = pgd_next, start != end);
2551
2552 ret = 0;
2553 out_unmap:
2554 up_read(&mm->mmap_sem);
2555
2556 if (ret) {
2557 pr_debug("%s: Ignoring mapping for %#lx ~ %#lx\n",
2558 __func__, start, end);
2559 __sysmmu_unmap_user_pages(dev, mm, vaddr, iova_start,
2560 start - (vaddr & PAGE_MASK));
2561 }
2562
2563 return ret;
2564 }
2565
2566 int exynos_sysmmu_unmap_user_pages(struct device *dev,
2567 struct mm_struct *mm,
2568 unsigned long vaddr,
2569 exynos_iova_t iova,
2570 size_t size)
2571 {
2572 if (WARN_ON(size == 0))
2573 return 0;
2574
2575 return __sysmmu_unmap_user_pages(dev, mm, vaddr, iova, size);
2576 }
2577
2578 static int __init exynos_iommu_create_domain(void)
2579 {
2580 struct device_node *domain;
2581
2582 for_each_compatible_node(domain, NULL, "samsung,exynos-iommu-bus") {
2583 struct device_node *np;
2584 struct exynos_iovmm *vmm = NULL;
2585 int i = 0;
2586
2587 while ((np = of_parse_phandle(domain, "domain-clients", i++))) {
2588 struct platform_device *master =
2589 of_find_device_by_node(np);
2590 struct exynos_iommu_owner *owner;
2591 struct exynos_iommu_domain *priv;
2592
2593 if (!master) {
2594 pr_err("%s: master IP in '%s' not found\n",
2595 __func__, np->name);
2596 of_node_put(np);
2597 of_node_put(domain);
2598 return -ENOENT;
2599 }
2600
2601 owner = (struct exynos_iommu_owner *)
2602 master->dev.archdata.iommu;
2603 if (!owner) {
2604 pr_err("%s: No System MMU attached for %s\n",
2605 __func__, np->name);
2606 of_node_put(np);
2607 continue;
2608 }
2609
2610 if (!vmm) {
2611 vmm = exynos_create_single_iovmm(np->name);
2612 if (IS_ERR(vmm)) {
2613 pr_err("%s: Failed to create IOVM space\
2614 of %s\n",
2615 __func__, np->name);
2616 of_node_put(np);
2617 of_node_put(domain);
2618 return -ENOMEM;
2619 }
2620 }
2621
2622 priv = (struct exynos_iommu_domain *)vmm->domain->priv;
2623
2624 owner->vmm_data = vmm;
2625 spin_lock(&priv->lock);
2626 list_add_tail(&owner->client, &priv->clients);
2627 spin_unlock(&priv->lock);
2628
2629 of_node_put(np);
2630
2631 dev_err(&master->dev,
2632 "create IOVMM device node : %s\n", np->name);
2633 }
2634 of_node_put(domain);
2635 }
2636 return 0;
2637 }
2638
2639 static int __init exynos_iommu_init(void)
2640 {
2641 struct page *page;
2642 int ret = -ENOMEM;
2643
2644 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
2645 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
2646 if (!lv2table_kmem_cache) {
2647 pr_err("%s: failed to create kmem cache\n", __func__);
2648 return -ENOMEM;
2649 }
2650
2651 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2652 if (!page) {
2653 pr_err("%s: failed to allocate fault page\n", __func__);
2654 goto err_fault_page;
2655 }
2656 fault_page = page_to_phys(page);
2657
2658 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
2659 if (ret) {
2660 pr_err("%s: Failed to register IOMMU ops\n", __func__);
2661 goto err_set_iommu;
2662 }
2663
2664 exynos_sysmmu_debugfs_root = debugfs_create_dir("sysmmu", NULL);
2665 if (!exynos_sysmmu_debugfs_root)
2666 pr_err("%s: Failed to create debugfs entry\n", __func__);
2667
2668 ret = platform_driver_register(&exynos_sysmmu_driver);
2669 if (ret) {
2670 pr_err("%s: Failed to register System MMU driver.\n", __func__);
2671 goto err_driver_register;
2672 }
2673
2674 ret = exynos_iommu_create_domain();
2675 if (ret && (ret != -ENOENT)) {
2676 pr_err("%s: Failed to create iommu domain\n", __func__);
2677 platform_driver_unregister(&exynos_sysmmu_driver);
2678 goto err_driver_register;
2679 }
2680
2681 return 0;
2682 err_driver_register:
2683 bus_set_iommu(&platform_bus_type, NULL);
2684 err_set_iommu:
2685 __free_page(page);
2686 err_fault_page:
2687 kmem_cache_destroy(lv2table_kmem_cache);
2688 return ret;
2689 }
2690 arch_initcall_sync(exynos_iommu_init);
2691
2692 #ifdef CONFIG_PM_SLEEP
2693 static int sysmmu_pm_genpd_suspend(struct device *dev)
2694 {
2695 struct sysmmu_list_data *list;
2696 int ret;
2697
2698 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2699
2700 ret = pm_generic_suspend(dev);
2701 if (ret) {
2702 TRACE_LOG("<----- %s(%s) Failed\n", __func__, dev_name(dev));
2703 return ret;
2704 }
2705
2706 for_each_sysmmu_list(dev, list) {
2707 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2708 unsigned long flags;
2709 TRACE_LOG("Suspending %s...\n", dev_name(drvdata->sysmmu));
2710 spin_lock_irqsave(&drvdata->lock, flags);
2711 if (!drvdata->suspended && is_sysmmu_active(drvdata) &&
2712 (!pm_runtime_enabled(dev) ||
2713 is_sysmmu_runtime_active(drvdata)))
2714 __sysmmu_disable_nocount(drvdata);
2715 drvdata->suspended = true;
2716 spin_unlock_irqrestore(&drvdata->lock, flags);
2717 }
2718
2719 TRACE_LOG("<----- %s(%s)\n", __func__, dev_name(dev));
2720
2721 return 0;
2722 }
2723
2724 static int sysmmu_pm_genpd_resume(struct device *dev)
2725 {
2726 struct sysmmu_list_data *list;
2727 int ret;
2728
2729 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2730
2731 for_each_sysmmu_list(dev, list) {
2732 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2733 unsigned long flags;
2734 spin_lock_irqsave(&drvdata->lock, flags);
2735 if (drvdata->suspended && is_sysmmu_active(drvdata) &&
2736 (!pm_runtime_enabled(dev) ||
2737 is_sysmmu_runtime_active(drvdata)))
2738 __sysmmu_enable_nocount(drvdata);
2739 drvdata->suspended = false;
2740 spin_unlock_irqrestore(&drvdata->lock, flags);
2741 }
2742
2743 ret = pm_generic_resume(dev);
2744
2745 TRACE_LOG("<----- %s(%s) OK\n", __func__, dev_name(dev));
2746
2747 return ret;
2748 }
2749 #endif
2750
2751 #ifdef CONFIG_PM_RUNTIME
2752 static void sysmmu_restore_state(struct device *dev)
2753 {
2754 struct sysmmu_list_data *list;
2755
2756 for_each_sysmmu_list(dev, list) {
2757 struct sysmmu_drvdata *data = dev_get_drvdata(list->sysmmu);
2758 unsigned long flags;
2759
2760 TRACE_LOG("%s(%s)\n", __func__, dev_name(data->sysmmu));
2761
2762 SYSMMU_EVENT_LOG_POWERON(SYSMMU_DRVDATA_TO_LOG(data));
2763
2764 spin_lock_irqsave(&data->lock, flags);
2765 if (get_sysmmu_runtime_active(data) && is_sysmmu_active(data))
2766 __sysmmu_enable_nocount(data);
2767 spin_unlock_irqrestore(&data->lock, flags);
2768 }
2769 }
2770
2771 static void sysmmu_save_state(struct device *dev)
2772 {
2773 struct sysmmu_list_data *list;
2774
2775 for_each_sysmmu_list(dev, list) {
2776 struct sysmmu_drvdata *data = dev_get_drvdata(list->sysmmu);
2777 unsigned long flags;
2778
2779 TRACE_LOG("%s(%s)\n", __func__, dev_name(data->sysmmu));
2780
2781 SYSMMU_EVENT_LOG_POWEROFF(SYSMMU_DRVDATA_TO_LOG(data));
2782
2783 spin_lock_irqsave(&data->lock, flags);
2784 if (put_sysmmu_runtime_active(data) && is_sysmmu_active(data))
2785 __sysmmu_disable_nocount(data);
2786 spin_unlock_irqrestore(&data->lock, flags);
2787 }
2788 }
2789
2790 static int sysmmu_pm_genpd_save_state(struct device *dev)
2791 {
2792 int (*cb)(struct device *__dev);
2793 int ret = 0;
2794
2795 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2796
2797 if (dev->type && dev->type->pm)
2798 cb = dev->type->pm->runtime_suspend;
2799 else if (dev->class && dev->class->pm)
2800 cb = dev->class->pm->runtime_suspend;
2801 else if (dev->bus && dev->bus->pm)
2802 cb = dev->bus->pm->runtime_suspend;
2803 else
2804 cb = NULL;
2805
2806 if (!cb && dev->driver && dev->driver->pm)
2807 cb = dev->driver->pm->runtime_suspend;
2808
2809 if (cb)
2810 ret = cb(dev);
2811
2812 if (ret == 0)
2813 sysmmu_save_state(dev);
2814
2815 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__, dev_name(dev),
2816 cb, ret ? "Failed" : "OK");
2817
2818 return ret;
2819 }
2820
2821 static int sysmmu_pm_genpd_restore_state(struct device *dev)
2822 {
2823 int (*cb)(struct device *__dev);
2824 int ret = 0;
2825
2826 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2827
2828 if (dev->type && dev->type->pm)
2829 cb = dev->type->pm->runtime_resume;
2830 else if (dev->class && dev->class->pm)
2831 cb = dev->class->pm->runtime_resume;
2832 else if (dev->bus && dev->bus->pm)
2833 cb = dev->bus->pm->runtime_resume;
2834 else
2835 cb = NULL;
2836
2837 if (!cb && dev->driver && dev->driver->pm)
2838 cb = dev->driver->pm->runtime_resume;
2839
2840 sysmmu_restore_state(dev);
2841
2842 if (cb)
2843 ret = cb(dev);
2844
2845 if (ret)
2846 sysmmu_save_state(dev);
2847
2848 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__, dev_name(dev),
2849 cb, ret ? "Failed" : "OK");
2850
2851 return ret;
2852 }
2853 #endif
2854
2855 #ifdef CONFIG_PM_GENERIC_DOMAINS
2856 static struct gpd_dev_ops sysmmu_devpm_ops = {
2857 #ifdef CONFIG_PM_RUNTIME
2858 .save_state = &sysmmu_pm_genpd_save_state,
2859 .restore_state = &sysmmu_pm_genpd_restore_state,
2860 #endif
2861 #ifdef CONFIG_PM_SLEEP
2862 .suspend = &sysmmu_pm_genpd_suspend,
2863 .resume = &sysmmu_pm_genpd_resume,
2864 #endif
2865 };
2866
2867 static int sysmmu_hook_driver_register(struct notifier_block *nb,
2868 unsigned long val,
2869 void *p)
2870 {
2871 struct device *dev = p;
2872
2873 /*
2874 * No System MMU assigned. See exynos_sysmmu_probe().
2875 */
2876 if (dev->archdata.iommu == NULL)
2877 return 0;
2878
2879 switch (val) {
2880 case BUS_NOTIFY_BIND_DRIVER:
2881 {
2882 if (dev->pm_domain) {
2883 int ret = pm_genpd_add_callbacks(
2884 dev, &sysmmu_devpm_ops, NULL);
2885 if (ret && (ret != -ENOSYS)) {
2886 dev_err(dev,
2887 "Failed to register 'dev_pm_ops' for iommu\n");
2888 return ret;
2889 }
2890
2891 dev_info(dev, "exynos-iommu gpd_dev_ops inserted!\n");
2892 }
2893
2894 break;
2895 }
2896 case BUS_NOTIFY_BOUND_DRIVER:
2897 {
2898 struct sysmmu_list_data *list;
2899
2900 if (pm_runtime_enabled(dev) && dev->pm_domain)
2901 break;
2902
2903 for_each_sysmmu_list(dev, list) {
2904 struct sysmmu_drvdata *data =
2905 dev_get_drvdata(list->sysmmu);
2906 unsigned long flags;
2907 spin_lock_irqsave(&data->lock, flags);
2908 if (is_sysmmu_active(data) &&
2909 get_sysmmu_runtime_active(data))
2910 __sysmmu_enable_nocount(data);
2911 pm_runtime_disable(data->sysmmu);
2912 spin_unlock_irqrestore(&data->lock, flags);
2913 }
2914
2915 break;
2916 }
2917 case BUS_NOTIFY_UNBOUND_DRIVER:
2918 {
2919 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2920 WARN_ON(!list_empty(&owner->client));
2921 __pm_genpd_remove_callbacks(dev, false);
2922 dev_info(dev, "exynos-iommu gpd_dev_ops removed!\n");
2923 break;
2924 }
2925 } /* switch (val) */
2926
2927 return 0;
2928 }
2929
2930 static struct notifier_block sysmmu_notifier = {
2931 .notifier_call = &sysmmu_hook_driver_register,
2932 };
2933
2934 static int __init exynos_iommu_prepare(void)
2935 {
2936 return bus_register_notifier(&platform_bus_type, &sysmmu_notifier);
2937 }
2938 subsys_initcall_sync(exynos_iommu_prepare);
2939 #endif /* CONFIG_PM_GENERIC_DOMAINS */
2940
2941 static int sysmmu_fault_notifier(struct notifier_block *nb,
2942 unsigned long fault_addr, void *data)
2943 {
2944 struct exynos_iommu_owner *owner = NULL;
2945 struct exynos_iovmm *vmm;
2946
2947 owner = container_of(nb, struct exynos_iommu_owner, nb);
2948
2949 if (owner && owner->fault_handler) {
2950 vmm = exynos_get_iovmm(owner->dev);
2951 if (vmm && vmm->domain)
2952 owner->fault_handler(vmm->domain, owner->dev,
2953 fault_addr, (unsigned long)data,
2954 owner->token);
2955 }
2956
2957 return 0;
2958 }
2959
2960 int exynos_sysmmu_add_fault_notifier(struct device *dev,
2961 iommu_fault_handler_t handler, void *token)
2962 {
2963 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2964 struct sysmmu_list_data *list;
2965 struct sysmmu_drvdata *drvdata;
2966 unsigned long flags;
2967 int ret;
2968
2969 if (!has_sysmmu(dev)) {
2970 dev_info(dev, "%s doesn't have sysmmu\n", dev_name(dev));
2971 return -EINVAL;
2972 }
2973
2974 spin_lock_irqsave(&owner->lock, flags);
2975
2976 owner->fault_handler = handler;
2977 owner->token = token;
2978 owner->nb.notifier_call = sysmmu_fault_notifier;
2979
2980 for_each_sysmmu_list(dev, list) {
2981 drvdata = dev_get_drvdata(list->sysmmu);
2982 ret = atomic_notifier_chain_register(
2983 &drvdata->fault_notifiers, &owner->nb);
2984 if (ret) {
2985 dev_err(dev,
2986 "Failed to register %s's fault notifier\n",
2987 dev_name(dev));
2988 goto err;
2989 }
2990
2991 }
2992
2993 spin_unlock_irqrestore(&owner->lock, flags);
2994
2995 return 0;
2996
2997 err:
2998 for_each_sysmmu_list(dev, list) {
2999 drvdata = dev_get_drvdata(list->sysmmu);
3000 atomic_notifier_chain_unregister(
3001 &drvdata->fault_notifiers, &owner->nb);
3002 }
3003 spin_unlock_irqrestore(&owner->lock, flags);
3004
3005 return ret;
3006 }
3007
3008 static void sysmmu_dump_lv2_page_table(unsigned int lv1idx, sysmmu_pte_t *base)
3009 {
3010 unsigned int i;
3011 for (i = 0; i < NUM_LV2ENTRIES; i += 4) {
3012 if (!base[i] && !base[i + 1] && !base[i + 2] && !base[i + 3])
3013 continue;
3014 pr_info(" LV2[%04d][%03d] %08x %08x %08x %08x\n",
3015 lv1idx, i,
3016 base[i], base[i + 1], base[i + 2], base[i + 3]);
3017 }
3018 }
3019
3020 static void sysmmu_dump_page_table(sysmmu_pte_t *base)
3021 {
3022 unsigned int i;
3023 phys_addr_t phys_base = virt_to_phys(base);
3024
3025 pr_info("---- System MMU Page Table @ %pa ----\n", &phys_base);
3026
3027 for (i = 0; i < NUM_LV1ENTRIES; i += 4) {
3028 unsigned int j;
3029 if (!base[i])
3030 continue;
3031 pr_info("LV1[%04d] %08x %08x %08x %08x\n",
3032 i, base[i], base[i + 1], base[i + 2], base[i + 3]);
3033
3034 for (j = 0; j < 4; j++)
3035 if (lv1ent_page(&base[i + j]))
3036 sysmmu_dump_lv2_page_table(i + j,
3037 page_entry(&base[i + j], 0));
3038 }
3039 }
3040
3041 void exynos_sysmmu_show_status(struct device *dev)
3042 {
3043 struct sysmmu_list_data *list;
3044
3045 for_each_sysmmu_list(dev, list) {
3046 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
3047
3048 if (!is_sysmmu_active(drvdata) ||
3049 !is_sysmmu_runtime_active(drvdata)) {
3050 dev_info(drvdata->sysmmu,
3051 "%s: System MMU is not active\n", __func__);
3052 continue;
3053 }
3054
3055 pr_info("DUMPING SYSTEM MMU: %s\n", dev_name(drvdata->sysmmu));
3056
3057 __master_clk_enable(drvdata);
3058 if (sysmmu_block(drvdata->sfrbase))
3059 dump_sysmmu_tlb_pb(drvdata->sfrbase);
3060 else
3061 pr_err("!!Failed to block Sytem MMU!\n");
3062 sysmmu_unblock(drvdata->sfrbase);
3063
3064 __master_clk_disable(drvdata);
3065 }
3066 }
3067
3068 void exynos_sysmmu_dump_pgtable(struct device *dev)
3069 {
3070 struct exynos_iommu_owner *owner = dev->archdata.iommu;
3071 struct sysmmu_list_data *list =
3072 list_entry(&owner->mmu_list, struct sysmmu_list_data, node);
3073 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
3074
3075 sysmmu_dump_page_table(phys_to_virt(drvdata->pgtable));
3076 }
3077
3078 void dump_sysmmu_ppc_cnt(struct sysmmu_drvdata *drvdata)
3079 {
3080 unsigned int cfg;
3081 int i;
3082
3083 pr_crit("------------- System MMU PPC Status --------------\n");
3084 for (i = 0; i < drvdata->event_cnt; i++) {
3085 cfg = __raw_readl(drvdata->sfrbase +
3086 REG_PPC_EVENT_SEL(i));
3087 pr_crit("%s %s %s CNT : %d", dev_name(drvdata->sysmmu),
3088 cfg & 0x10 ? "WRITE" : "READ",
3089 ppc_event_name[cfg & 0x7],
3090 __raw_readl(drvdata->sfrbase + REG_PPC_PMCNT(i)));
3091 }
3092 pr_crit("--------------------------------------------------\n");
3093 }
3094
3095 int sysmmu_set_ppc_event(struct sysmmu_drvdata *drvdata, int event)
3096 {
3097 unsigned int cfg;
3098
3099 if (event < 0 || event > TOTAL_ID_NUM ||
3100 event == READ_TLB_MISS || event == WRITE_TLB_MISS ||
3101 event == READ_FLPD_MISS_PREFETCH ||
3102 event == WRITE_FLPD_MISS_PREFETCH)
3103 return -EINVAL;
3104
3105 if (!drvdata->event_cnt)
3106 __raw_writel(0x1, drvdata->sfrbase + REG_PPC_PMNC);
3107
3108 __raw_writel(event, drvdata->sfrbase +
3109 REG_PPC_EVENT_SEL(drvdata->event_cnt));
3110 cfg = __raw_readl(drvdata->sfrbase +
3111 REG_PPC_CNTENS);
3112 __raw_writel(cfg | 0x1 << drvdata->event_cnt,
3113 drvdata->sfrbase + REG_PPC_CNTENS);
3114 return 0;
3115 }
3116 void exynos_sysmmu_show_ppc_event(struct device *dev)
3117 {
3118 struct sysmmu_list_data *list;
3119 unsigned long flags;
3120
3121 for_each_sysmmu_list(dev, list) {
3122 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
3123
3124 spin_lock_irqsave(&drvdata->lock, flags);
3125 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
3126 dev_info(drvdata->sysmmu,
3127 "%s: System MMU is not active\n", __func__);
3128 spin_unlock_irqrestore(&drvdata->lock, flags);
3129 continue;
3130 }
3131
3132 __master_clk_enable(drvdata);
3133 if (sysmmu_block(drvdata->sfrbase))
3134 dump_sysmmu_ppc_cnt(drvdata);
3135 else
3136 pr_err("!!Failed to block Sytem MMU!\n");
3137 sysmmu_unblock(drvdata->sfrbase);
3138 __master_clk_disable(drvdata);
3139 spin_unlock_irqrestore(&drvdata->lock, flags);
3140 }
3141 }
3142
3143 void exynos_sysmmu_clear_ppc_event(struct device *dev)
3144 {
3145 struct sysmmu_list_data *list;
3146 unsigned long flags;
3147
3148 for_each_sysmmu_list(dev, list) {
3149 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
3150
3151 spin_lock_irqsave(&drvdata->lock, flags);
3152 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
3153 dev_info(drvdata->sysmmu,
3154 "%s: System MMU is not active\n", __func__);
3155 spin_unlock_irqrestore(&drvdata->lock, flags);
3156 continue;
3157 }
3158
3159 __master_clk_enable(drvdata);
3160 if (sysmmu_block(drvdata->sfrbase)) {
3161 dump_sysmmu_ppc_cnt(drvdata);
3162 __raw_writel(0x2, drvdata->sfrbase + REG_PPC_PMNC);
3163 __raw_writel(0, drvdata->sfrbase + REG_PPC_CNTENS);
3164 __raw_writel(0, drvdata->sfrbase + REG_PPC_INTENS);
3165 drvdata->event_cnt = 0;
3166 } else
3167 pr_err("!!Failed to block Sytem MMU!\n");
3168 sysmmu_unblock(drvdata->sfrbase);
3169 __master_clk_disable(drvdata);
3170
3171 spin_unlock_irqrestore(&drvdata->lock, flags);
3172 }
3173 }
3174
3175 int exynos_sysmmu_set_ppc_event(struct device *dev, int event)
3176 {
3177 struct sysmmu_list_data *list;
3178 unsigned long flags;
3179 int ret = 0;
3180
3181 for_each_sysmmu_list(dev, list) {
3182 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
3183
3184 spin_lock_irqsave(&drvdata->lock, flags);
3185 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
3186 dev_info(drvdata->sysmmu,
3187 "%s: System MMU is not active\n", __func__);
3188 spin_unlock_irqrestore(&drvdata->lock, flags);
3189 continue;
3190 }
3191
3192 __master_clk_enable(drvdata);
3193 if (sysmmu_block(drvdata->sfrbase)) {
3194 if (drvdata->event_cnt < MAX_NUM_PPC) {
3195 ret = sysmmu_set_ppc_event(drvdata, event);
3196 if (ret)
3197 pr_err("Not supported Event ID (%d)",
3198 event);
3199 else
3200 drvdata->event_cnt++;
3201 }
3202 } else
3203 pr_err("!!Failed to block Sytem MMU!\n");
3204 sysmmu_unblock(drvdata->sfrbase);
3205 __master_clk_disable(drvdata);
3206
3207 spin_unlock_irqrestore(&drvdata->lock, flags);
3208 }
3209
3210 return ret;
3211 }