ARM: Fix build after memfd_create syscall
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / iommu / exynos-iommu.c
CommitLineData
3c2a0909 1#include <linux/kernel.h>
2a96536e
KC
2#include <linux/io.h>
3#include <linux/interrupt.h>
2a96536e
KC
4#include <linux/slab.h>
5#include <linux/pm_runtime.h>
2a96536e
KC
6#include <linux/err.h>
7#include <linux/mm.h>
2a96536e 8#include <linux/errno.h>
2a96536e
KC
9#include <linux/memblock.h>
10#include <linux/export.h>
3c2a0909
S
11#include <linux/string.h>
12#include <linux/of.h>
13#include <linux/of_platform.h>
14#include <linux/device.h>
15#include <linux/clk-private.h>
16#include <linux/pm_domain.h>
17#include <linux/vmalloc.h>
18#include <linux/debugfs.h>
19#include <linux/dma-mapping.h>
20#include <linux/kmemleak.h>
2a96536e
KC
21
22#include <asm/cacheflush.h>
23#include <asm/pgtable.h>
24
3c2a0909
S
25#if defined(CONFIG_SOC_EXYNOS5430)
26#include <mach/regs-clock.h>
27#endif
28
29#include "exynos-iommu.h"
30
31#define MAX_NUM_PPC 4
32
33const char *ppc_event_name[] = {
34 "TOTAL",
35 "L1TLB MISS",
36 "L2TLB MISS",
37 "FLPD CACHE MISS",
38 "PB LOOK-UP",
39 "PB MISS",
40 "BLOCK NUM BY PREFETCHING",
41 "BLOCK CYCLE BY PREFETCHING",
42 "TLB MISS",
43 "FLPD MISS ON PREFETCHING",
44};
45
46static int iova_from_sent(sysmmu_pte_t *base, sysmmu_pte_t *sent)
47{
48 return ((unsigned long)sent - (unsigned long)base) *
49 (SECT_SIZE / sizeof(sysmmu_pte_t));
50}
2a96536e 51
3c2a0909
S
52struct sysmmu_list_data {
53 struct device *sysmmu;
54 struct list_head node; /* entry of exynos_iommu_owner.mmu_list */
55};
2a96536e 56
3c2a0909
S
57#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
58#define for_each_sysmmu_list(dev, sysmmu_list) \
59 list_for_each_entry(sysmmu_list, \
60 &((struct exynos_iommu_owner *)dev->archdata.iommu)->mmu_list,\
61 node)
2a96536e 62
3c2a0909
S
63static struct exynos_iommu_owner *sysmmu_owner_list = NULL;
64static struct sysmmu_drvdata *sysmmu_drvdata_list = NULL;
2a96536e 65
3c2a0909
S
66static struct kmem_cache *lv2table_kmem_cache;
67static phys_addr_t fault_page;
68sysmmu_pte_t *zero_lv2_table;
69static struct dentry *exynos_sysmmu_debugfs_root;
2a96536e 70
3c2a0909
S
71#ifdef CONFIG_ARM
72static inline void pgtable_flush(void *vastart, void *vaend)
73{
74 dmac_flush_range(vastart, vaend);
75 outer_flush_range(virt_to_phys(vastart),
76 virt_to_phys(vaend));
77}
78#else /* ARM64 */
79static inline void pgtable_flush(void *vastart, void *vaend)
80{
81 dma_sync_single_for_device(NULL,
82 virt_to_phys(vastart),
83 (size_t)(virt_to_phys(vaend) - virt_to_phys(vastart)),
84 DMA_TO_DEVICE);
85}
86#endif
2a96536e 87
2a96536e 88
2a96536e 89
3c2a0909
S
90void sysmmu_tlb_invalidate_flpdcache(struct device *dev, dma_addr_t iova)
91{
92 struct sysmmu_list_data *list;
93
94 for_each_sysmmu_list(dev, list) {
95 unsigned long flags;
96 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
97
98 spin_lock_irqsave(&drvdata->lock, flags);
99 if (is_sysmmu_active(drvdata) && drvdata->runtime_active) {
100 TRACE_LOG_DEV(drvdata->sysmmu,
101 "FLPD invalidation @ %#x\n", iova);
102 __master_clk_enable(drvdata);
103 __sysmmu_tlb_invalidate_flpdcache(
104 drvdata->sfrbase, iova);
105 SYSMMU_EVENT_LOG_FLPD_FLUSH(
106 SYSMMU_DRVDATA_TO_LOG(drvdata), iova);
107 __master_clk_disable(drvdata);
108 } else {
109 TRACE_LOG_DEV(drvdata->sysmmu,
110 "Skip FLPD invalidation @ %#x\n", iova);
111 }
112 spin_unlock_irqrestore(&drvdata->lock, flags);
113 }
114}
2a96536e 115
3c2a0909
S
116static void sysmmu_tlb_invalidate_entry(struct device *dev, dma_addr_t iova,
117 bool force)
118{
119 struct sysmmu_list_data *list;
120
121 for_each_sysmmu_list(dev, list) {
122 unsigned long flags;
123 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
124
125 if (!force && !(drvdata->prop & SYSMMU_PROP_NONBLOCK_TLBINV))
126 continue;
127
128 spin_lock_irqsave(&drvdata->lock, flags);
129 if (is_sysmmu_active(drvdata) && drvdata->runtime_active) {
130 TRACE_LOG_DEV(drvdata->sysmmu,
131 "TLB invalidation @ %#x\n", iova);
132 __master_clk_enable(drvdata);
133 __sysmmu_tlb_invalidate_entry(drvdata->sfrbase, iova);
134 SYSMMU_EVENT_LOG_TLB_INV_VPN(
135 SYSMMU_DRVDATA_TO_LOG(drvdata), iova);
136 __master_clk_disable(drvdata);
137 } else {
138 TRACE_LOG_DEV(drvdata->sysmmu,
139 "Skip TLB invalidation @ %#x\n", iova);
140 }
141 spin_unlock_irqrestore(&drvdata->lock, flags);
142 }
143}
2a96536e 144
3c2a0909
S
145void exynos_sysmmu_tlb_invalidate(struct iommu_domain *domain, dma_addr_t start,
146 size_t size)
147{
148 struct exynos_iommu_domain *priv = domain->priv;
149 struct exynos_iommu_owner *owner;
150 struct sysmmu_list_data *list;
151 unsigned long flags;
2a96536e 152
3c2a0909
S
153 spin_lock_irqsave(&priv->lock, flags);
154 list_for_each_entry(owner, &priv->clients, client) {
155 for_each_sysmmu_list(owner->dev, list) {
156 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2a96536e 157
3c2a0909
S
158 if (!!(drvdata->prop & SYSMMU_PROP_NONBLOCK_TLBINV))
159 continue;
2a96536e 160
3c2a0909
S
161 spin_lock(&drvdata->lock);
162 if (!is_sysmmu_active(drvdata) ||
163 !is_sysmmu_runtime_active(drvdata)) {
164 spin_unlock(&drvdata->lock);
165 TRACE_LOG_DEV(drvdata->sysmmu,
166 "Skip TLB invalidation %#x@%#x\n", size, start);
167 continue;
168 }
2a96536e 169
3c2a0909
S
170 TRACE_LOG_DEV(drvdata->sysmmu,
171 "TLB invalidation %#x@%#x\n", size, start);
2a96536e 172
3c2a0909 173 __master_clk_enable(drvdata);
2a96536e 174
3c2a0909 175 __sysmmu_tlb_invalidate(drvdata, start, size);
2a96536e 176
3c2a0909 177 __master_clk_disable(drvdata);
2a96536e 178
3c2a0909
S
179 spin_unlock(&drvdata->lock);
180 }
181 }
182 spin_unlock_irqrestore(&priv->lock, flags);
2a96536e
KC
183}
184
3c2a0909 185static inline void __sysmmu_disable_nocount(struct sysmmu_drvdata *drvdata)
2a96536e 186{
3c2a0909
S
187 int disable = (drvdata->prop & SYSMMU_PROP_STOP_BLOCK) ?
188 CTRL_BLOCK_DISABLE : CTRL_DISABLE;
189
190#if defined(CONFIG_SOC_EXYNOS5430)
191 if (!strcmp(dev_name(drvdata->sysmmu), "15200000.sysmmu")) {
192 if (!(__raw_readl(EXYNOS5430_ENABLE_ACLK_MFC0_SECURE_SMMU_MFC) & 0x1) ||
193 !(__raw_readl(EXYNOS5430_ENABLE_PCLK_MFC0_SECURE_SMMU_MFC) & 0x1)) {
194 pr_err("MFC0_0 SYSMMU clock is disabled ACLK: [%#x], PCLK[%#x]\n",
195 __raw_readl(EXYNOS5430_ENABLE_ACLK_MFC0_SECURE_SMMU_MFC),
196 __raw_readl(EXYNOS5430_ENABLE_PCLK_MFC0_SECURE_SMMU_MFC));
197 BUG();
198 }
199 } else if (!strcmp(dev_name(drvdata->sysmmu), "15210000.sysmmu")) {
200 if (!(__raw_readl(EXYNOS5430_ENABLE_ACLK_MFC0_SECURE_SMMU_MFC) & 0x2) ||
201 !(__raw_readl(EXYNOS5430_ENABLE_PCLK_MFC0_SECURE_SMMU_MFC) & 0x2)) {
202 pr_err("MFC0_1 SYSMMU clock is disabled ACLK: [%#x], PCLK[%#x]\n",
203 __raw_readl(EXYNOS5430_ENABLE_ACLK_MFC0_SECURE_SMMU_MFC),
204 __raw_readl(EXYNOS5430_ENABLE_PCLK_MFC0_SECURE_SMMU_MFC));
205 BUG();
206 }
207 } else if (!strcmp(dev_name(drvdata->sysmmu), "15300000.sysmmu")) {
208 if (!(__raw_readl(EXYNOS5430_ENABLE_ACLK_MFC1_SECURE_SMMU_MFC) & 0x1) ||
209 !(__raw_readl(EXYNOS5430_ENABLE_PCLK_MFC1_SECURE_SMMU_MFC) & 0x1)) {
210 pr_err("MFC1_0 SYSMMU clock is disabled ACLK: [%#x], PCLK[%#x]\n",
211 __raw_readl(EXYNOS5430_ENABLE_ACLK_MFC1_SECURE_SMMU_MFC),
212 __raw_readl(EXYNOS5430_ENABLE_PCLK_MFC1_SECURE_SMMU_MFC));
213 BUG();
214 }
215 } else if (!strcmp(dev_name(drvdata->sysmmu), "15310000.sysmmu")) {
216 if (!(__raw_readl(EXYNOS5430_ENABLE_ACLK_MFC1_SECURE_SMMU_MFC) & 0x2) ||
217 !(__raw_readl(EXYNOS5430_ENABLE_PCLK_MFC1_SECURE_SMMU_MFC) & 0x2)) {
218 pr_err("MFC1_1 SYSMMU clock is disabled ACLK: [%#x], PCLK[%#x]\n",
219 __raw_readl(EXYNOS5430_ENABLE_ACLK_MFC1_SECURE_SMMU_MFC),
220 __raw_readl(EXYNOS5430_ENABLE_PCLK_MFC1_SECURE_SMMU_MFC));
221 BUG();
222 }
223 }
224#endif
2a96536e 225
3c2a0909 226 __raw_sysmmu_disable(drvdata->sfrbase, disable);
2a96536e 227
3c2a0909
S
228 __sysmmu_clk_disable(drvdata);
229 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE))
230 __master_clk_disable(drvdata);
2a96536e 231
3c2a0909 232 SYSMMU_EVENT_LOG_DISABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
2a96536e 233
3c2a0909 234 TRACE_LOG("%s(%s)\n", __func__, dev_name(drvdata->sysmmu));
2a96536e
KC
235}
236
3c2a0909 237static bool __sysmmu_disable(struct sysmmu_drvdata *drvdata)
2a96536e 238{
3c2a0909
S
239 bool disabled;
240 unsigned long flags;
2a96536e 241
3c2a0909 242 spin_lock_irqsave(&drvdata->lock, flags);
2a96536e 243
3c2a0909 244 disabled = set_sysmmu_inactive(drvdata);
2a96536e 245
3c2a0909
S
246 if (disabled) {
247 drvdata->pgtable = 0;
248 drvdata->domain = NULL;
2a96536e 249
3c2a0909
S
250 if (drvdata->runtime_active) {
251 __master_clk_enable(drvdata);
252 __sysmmu_disable_nocount(drvdata);
253 __master_clk_disable(drvdata);
254 }
2a96536e 255
3c2a0909
S
256 TRACE_LOG_DEV(drvdata->sysmmu, "Disabled\n");
257 } else {
258 TRACE_LOG_DEV(drvdata->sysmmu, "%d times left to disable\n",
259 drvdata->activations);
2a96536e
KC
260 }
261
3c2a0909 262 spin_unlock_irqrestore(&drvdata->lock, flags);
2a96536e 263
3c2a0909 264 return disabled;
2a96536e
KC
265}
266
3c2a0909 267static void __sysmmu_enable_nocount(struct sysmmu_drvdata *drvdata)
2a96536e 268{
3c2a0909
S
269 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU_NO_MASTER_CLKGATE))
270 __master_clk_enable(drvdata);
2a96536e 271
3c2a0909 272 __sysmmu_clk_enable(drvdata);
2a96536e 273
3c2a0909 274 __sysmmu_init_config(drvdata);
2a96536e 275
3c2a0909
S
276 __sysmmu_set_ptbase(drvdata->sfrbase, drvdata->pgtable / PAGE_SIZE);
277
278 __raw_sysmmu_enable(drvdata->sfrbase);
279
280 SYSMMU_EVENT_LOG_ENABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
281
282 TRACE_LOG_DEV(drvdata->sysmmu, "Really enabled\n");
2a96536e
KC
283}
284
3c2a0909
S
285static int __sysmmu_enable(struct sysmmu_drvdata *drvdata,
286 phys_addr_t pgtable, struct iommu_domain *domain)
2a96536e 287{
3c2a0909 288 int ret = 0;
2a96536e 289 unsigned long flags;
2a96536e 290
3c2a0909
S
291 spin_lock_irqsave(&drvdata->lock, flags);
292 if (set_sysmmu_active(drvdata)) {
293 drvdata->pgtable = pgtable;
294 drvdata->domain = domain;
2a96536e 295
3c2a0909
S
296 if (drvdata->runtime_active) {
297 __master_clk_enable(drvdata);
298 __sysmmu_enable_nocount(drvdata);
299 __master_clk_disable(drvdata);
300 }
2a96536e 301
3c2a0909
S
302 TRACE_LOG_DEV(drvdata->sysmmu, "Enabled\n");
303 } else {
304 ret = (pgtable == drvdata->pgtable) ? 1 : -EBUSY;
2a96536e 305
3c2a0909
S
306 TRACE_LOG_DEV(drvdata->sysmmu, "Already enabled (%d)\n", ret);
307 }
2a96536e 308
3c2a0909
S
309 if (WARN_ON(ret < 0))
310 set_sysmmu_inactive(drvdata); /* decrement count */
2a96536e 311
3c2a0909
S
312 spin_unlock_irqrestore(&drvdata->lock, flags);
313
314 return ret;
2a96536e
KC
315}
316
3c2a0909
S
317/* __exynos_sysmmu_enable: Enables System MMU
318 *
319 * returns -error if an error occurred and System MMU is not enabled,
320 * 0 if the System MMU has been just enabled and 1 if System MMU was already
321 * enabled before.
322 */
323static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
324 struct iommu_domain *domain)
2a96536e 325{
3c2a0909 326 int ret = 0;
2a96536e 327 unsigned long flags;
3c2a0909
S
328 struct exynos_iommu_owner *owner = dev->archdata.iommu;
329 struct sysmmu_list_data *list;
330
331 BUG_ON(!has_sysmmu(dev));
332
333 spin_lock_irqsave(&owner->lock, flags);
334
335 for_each_sysmmu_list(dev, list) {
336 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
337 drvdata->master = dev;
338 ret = __sysmmu_enable(drvdata, pgtable, domain);
339 if (ret < 0) {
340 struct sysmmu_list_data *iter;
341 for_each_sysmmu_list(dev, iter) {
342 if (iter == list)
343 break;
344 __sysmmu_disable(dev_get_drvdata(iter->sysmmu));
345 drvdata->master = NULL;
346 }
347 break;
348 }
349 }
350
351 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e 352
3c2a0909 353 return ret;
2a96536e
KC
354}
355
3c2a0909 356int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
2a96536e 357{
3c2a0909
S
358 int ret;
359
360 BUG_ON(!memblock_is_memory(pgtable));
361
362 ret = __exynos_sysmmu_enable(dev, pgtable, NULL);
2a96536e 363
3c2a0909 364 return ret;
2a96536e
KC
365}
366
3c2a0909 367bool exynos_sysmmu_disable(struct device *dev)
2a96536e 368{
3c2a0909
S
369 unsigned long flags;
370 bool disabled = true;
371 struct exynos_iommu_owner *owner = dev->archdata.iommu;
372 struct sysmmu_list_data *list;
2a96536e 373
3c2a0909 374 BUG_ON(!has_sysmmu(dev));
2a96536e 375
3c2a0909 376 spin_lock_irqsave(&owner->lock, flags);
2a96536e 377
3c2a0909
S
378 /* Every call to __sysmmu_disable() must return same result */
379 for_each_sysmmu_list(dev, list) {
380 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
381 disabled = __sysmmu_disable(drvdata);
382 if (disabled)
383 drvdata->master = NULL;
2a96536e
KC
384 }
385
3c2a0909 386 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e 387
3c2a0909 388 return disabled;
2a96536e
KC
389}
390
3c2a0909
S
391#ifdef CONFIG_EXYNOS_IOMMU_RECOVER_FAULT_HANDLER
392int recover_fault_handler (struct iommu_domain *domain,
393 struct device *dev, unsigned long fault_addr,
394 int itype, void *reserved)
2a96536e 395{
3c2a0909
S
396 struct exynos_iommu_domain *priv = domain->priv;
397 struct exynos_iommu_owner *owner;
398 unsigned long flags;
2a96536e 399
3c2a0909 400 itype %= 16;
2a96536e 401
3c2a0909
S
402 if (itype == SYSMMU_PAGEFAULT) {
403 struct exynos_iovmm *vmm_data;
404 sysmmu_pte_t *sent;
405 sysmmu_pte_t *pent;
2a96536e 406
3c2a0909 407 BUG_ON(priv->pgtable == NULL);
2a96536e 408
3c2a0909
S
409 spin_lock_irqsave(&priv->pgtablelock, flags);
410
411 sent = section_entry(priv->pgtable, fault_addr);
412 if (!lv1ent_page(sent)) {
413 pent = kmem_cache_zalloc(lv2table_kmem_cache,
414 GFP_ATOMIC);
415 if (!pent)
416 return -ENOMEM;
417
418 *sent = mk_lv1ent_page(virt_to_phys(pent));
419 pgtable_flush(sent, sent + 1);
420 }
421 pent = page_entry(sent, fault_addr);
422 if (lv2ent_fault(pent)) {
423 *pent = mk_lv2ent_spage(fault_page);
424 pgtable_flush(pent, pent + 1);
425 } else {
426 pr_err("[%s] 0x%lx by '%s' is already mapped\n",
427 sysmmu_fault_name[itype], fault_addr,
428 dev_name(dev));
429 }
430
431 spin_unlock_irqrestore(&priv->pgtablelock, flags);
2a96536e 432
3c2a0909
S
433 owner = dev->archdata.iommu;
434 vmm_data = (struct exynos_iovmm *)owner->vmm_data;
435 if (find_iovm_region(vmm_data, fault_addr)) {
436 pr_err("[%s] 0x%lx by '%s' is remapped\n",
437 sysmmu_fault_name[itype],
438 fault_addr, dev_name(dev));
439 } else {
440 pr_err("[%s] '%s' accessed unmapped address(0x%lx)\n",
441 sysmmu_fault_name[itype], dev_name(dev),
442 fault_addr);
443 }
444 } else if (itype == SYSMMU_L1TLB_MULTIHIT) {
445 spin_lock_irqsave(&priv->lock, flags);
446 list_for_each_entry(owner, &priv->clients, client)
447 sysmmu_tlb_invalidate_entry(owner->dev,
448 (dma_addr_t)fault_addr, true);
449 spin_unlock_irqrestore(&priv->lock, flags);
450
451 pr_err("[%s] occured at 0x%lx by '%s'\n",
452 sysmmu_fault_name[itype], fault_addr, dev_name(dev));
2a96536e 453 } else {
3c2a0909
S
454 return -ENOSYS;
455 }
456
457 return 0;
458}
459#else
460int recover_fault_handler (struct iommu_domain *domain,
461 struct device *dev, unsigned long fault_addr,
462 int itype, void *reserved)
463{
464 return -ENOSYS;
465}
466#endif
467
468/* called by exynos5-iommu.c and exynos7-iommu.c */
469#define PB_CFG_MASK 0x11111;
470int __prepare_prefetch_buffers_by_plane(struct sysmmu_drvdata *drvdata,
471 struct sysmmu_prefbuf prefbuf[], int num_pb,
472 int inplanes, int onplanes,
473 int ipoption, int opoption)
474{
475 int ret_num_pb = 0;
476 int i = 0;
477 struct exynos_iovmm *vmm;
478
479 if (!drvdata->master || !drvdata->master->archdata.iommu) {
480 dev_err(drvdata->sysmmu, "%s: No master device is specified\n",
481 __func__);
482 return 0;
2a96536e
KC
483 }
484
3c2a0909
S
485 vmm = ((struct exynos_iommu_owner *)
486 (drvdata->master->archdata.iommu))->vmm_data;
487 if (!vmm)
488 return 0; /* No VMM information to set prefetch buffers */
2a96536e 489
3c2a0909
S
490 if (!inplanes && !onplanes) {
491 inplanes = vmm->inplanes;
492 onplanes = vmm->onplanes;
2a96536e
KC
493 }
494
3c2a0909
S
495 ipoption &= PB_CFG_MASK;
496 opoption &= PB_CFG_MASK;
497
498 if (drvdata->prop & SYSMMU_PROP_READ) {
499 ret_num_pb = min(inplanes, num_pb);
500 for (i = 0; i < ret_num_pb; i++) {
501 prefbuf[i].base = vmm->iova_start[i];
502 prefbuf[i].size = vmm->iovm_size[i];
503 prefbuf[i].config = ipoption;
504 }
505 }
2a96536e 506
3c2a0909
S
507 if ((drvdata->prop & SYSMMU_PROP_WRITE) &&
508 (ret_num_pb < num_pb) && (onplanes > 0)) {
509 for (i = 0; i < min(num_pb - ret_num_pb, onplanes); i++) {
510 prefbuf[ret_num_pb + i].base =
511 vmm->iova_start[vmm->inplanes + i];
512 prefbuf[ret_num_pb + i].size =
513 vmm->iovm_size[vmm->inplanes + i];
514 prefbuf[ret_num_pb + i].config = opoption;
515 }
516
517 ret_num_pb += i;
518 }
2a96536e 519
3c2a0909
S
520 if (drvdata->prop & SYSMMU_PROP_WINDOW_MASK) {
521 unsigned long prop = (drvdata->prop & SYSMMU_PROP_WINDOW_MASK)
522 >> SYSMMU_PROP_WINDOW_SHIFT;
523 BUG_ON(ret_num_pb != 0);
524 for (i = 0; (i < (vmm->inplanes + vmm->onplanes)) &&
525 (ret_num_pb < num_pb); i++) {
526 if (prop & 1) {
527 prefbuf[ret_num_pb].base = vmm->iova_start[i];
528 prefbuf[ret_num_pb].size = vmm->iovm_size[i];
529 prefbuf[ret_num_pb].config = ipoption;
530 ret_num_pb++;
531 }
532 prop >>= 1;
533 if (prop == 0)
534 break;
535 }
536 }
2a96536e 537
3c2a0909 538 return ret_num_pb;
2a96536e
KC
539}
540
3c2a0909
S
541void sysmmu_set_prefetch_buffer_by_region(struct device *dev,
542 struct sysmmu_prefbuf pb_reg[], unsigned int num_reg)
2a96536e 543{
3c2a0909
S
544 struct exynos_iommu_owner *owner = dev->archdata.iommu;
545 struct sysmmu_list_data *list;
2a96536e 546 unsigned long flags;
2a96536e 547
3c2a0909
S
548 if (!dev->archdata.iommu) {
549 dev_err(dev, "%s: No System MMU is configured\n", __func__);
550 return;
551 }
2a96536e 552
3c2a0909 553 spin_lock_irqsave(&owner->lock, flags);
2a96536e 554
3c2a0909
S
555 for_each_sysmmu_list(dev, list) {
556 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2a96536e 557
3c2a0909 558 spin_lock(&drvdata->lock);
2a96536e 559
3c2a0909
S
560 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
561 spin_unlock(&drvdata->lock);
562 continue;
563 }
2a96536e 564
3c2a0909 565 __master_clk_enable(drvdata);
2a96536e 566
3c2a0909
S
567 if (sysmmu_block(drvdata->sfrbase)) {
568 __exynos_sysmmu_set_prefbuf_by_region(drvdata, pb_reg, num_reg);
569 sysmmu_unblock(drvdata->sfrbase);
570 }
571
572 __master_clk_disable(drvdata);
573
574 spin_unlock(&drvdata->lock);
575 }
576
577 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
578}
579
3c2a0909
S
580int sysmmu_set_prefetch_buffer_by_plane(struct device *dev,
581 unsigned int inplanes, unsigned int onplanes,
582 unsigned int ipoption, unsigned int opoption)
2a96536e 583{
3c2a0909
S
584 struct exynos_iommu_owner *owner = dev->archdata.iommu;
585 struct exynos_iovmm *vmm;
586 struct sysmmu_list_data *list;
2a96536e
KC
587 unsigned long flags;
588
3c2a0909
S
589 if (!dev->archdata.iommu) {
590 dev_err(dev, "%s: No System MMU is configured\n", __func__);
591 return -EINVAL;
592 }
2a96536e 593
3c2a0909
S
594 vmm = exynos_get_iovmm(dev);
595 if (!vmm) {
596 dev_err(dev, "%s: IOVMM is not configured\n", __func__);
597 return -EINVAL;
598 }
2a96536e 599
3c2a0909
S
600 if ((inplanes > vmm->inplanes) || (onplanes > vmm->onplanes)) {
601 dev_err(dev, "%s: Given planes [%d, %d] exceeds [%d, %d]\n",
602 __func__, inplanes, onplanes,
603 vmm->inplanes, vmm->onplanes);
604 return -EINVAL;
2a96536e
KC
605 }
606
3c2a0909 607 spin_lock_irqsave(&owner->lock, flags);
2a96536e 608
3c2a0909
S
609 for_each_sysmmu_list(dev, list) {
610 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2a96536e 611
3c2a0909 612 spin_lock(&drvdata->lock);
2a96536e 613
3c2a0909
S
614 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
615 spin_unlock(&drvdata->lock);
616 continue;
2a96536e
KC
617 }
618
3c2a0909
S
619 __master_clk_enable(drvdata);
620
621 if (sysmmu_block(drvdata->sfrbase)) {
622 __exynos_sysmmu_set_prefbuf_by_plane(drvdata,
623 inplanes, onplanes, ipoption, opoption);
624 sysmmu_unblock(drvdata->sfrbase);
625 }
626
627 __master_clk_disable(drvdata);
2a96536e 628
3c2a0909
S
629 spin_unlock(&drvdata->lock);
630 }
2a96536e 631
3c2a0909 632 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e 633
3c2a0909 634 return 0;
2a96536e
KC
635}
636
3c2a0909 637static void __sysmmu_set_ptwqos(struct sysmmu_drvdata *data)
2a96536e 638{
3c2a0909
S
639 u32 cfg;
640
641 if (!sysmmu_block(data->sfrbase))
642 return;
643
644 cfg = __raw_readl(data->sfrbase + REG_MMU_CFG);
645 cfg &= ~CFG_QOS(15); /* clearing PTW_QOS field */
646
647 /*
648 * PTW_QOS of System MMU 1.x ~ 3.x are all overridable
649 * in __sysmmu_init_config()
650 */
651 if (__raw_sysmmu_version(data->sfrbase) < MAKE_MMU_VER(5, 0))
652 cfg |= CFG_QOS(data->qos);
653 else if (!(data->qos < 0))
654 cfg |= CFG_QOS_OVRRIDE | CFG_QOS(data->qos);
655 else
656 cfg &= ~CFG_QOS_OVRRIDE;
2a96536e 657
3c2a0909
S
658 __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
659 sysmmu_unblock(data->sfrbase);
660}
2a96536e 661
3c2a0909
S
662static void __sysmmu_set_qos(struct device *dev, unsigned int qosval)
663{
664 struct exynos_iommu_owner *owner = dev->archdata.iommu;
665 struct sysmmu_list_data *list;
666 unsigned long flags;
2a96536e 667
3c2a0909
S
668 spin_lock_irqsave(&owner->lock, flags);
669
670 for_each_sysmmu_list(dev, list) {
671 struct sysmmu_drvdata *data;
672 data = dev_get_drvdata(list->sysmmu);
673 spin_lock(&data->lock);
674 data->qos = qosval;
675 if (is_sysmmu_really_enabled(data)) {
676 __master_clk_enable(data);
677 __sysmmu_set_ptwqos(data);
678 __master_clk_disable(data);
679 }
680 spin_unlock(&data->lock);
2a96536e
KC
681 }
682
3c2a0909 683 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
684}
685
3c2a0909 686void sysmmu_set_qos(struct device *dev, unsigned int qos)
2a96536e 687{
3c2a0909
S
688 __sysmmu_set_qos(dev, (qos > 15) ? 15 : qos);
689}
2a96536e 690
3c2a0909
S
691void sysmmu_reset_qos(struct device *dev)
692{
693 __sysmmu_set_qos(dev, DEFAULT_QOS_VALUE);
2a96536e
KC
694}
695
3c2a0909 696void exynos_sysmmu_set_df(struct device *dev, dma_addr_t iova)
2a96536e 697{
3c2a0909
S
698 struct exynos_iommu_owner *owner = dev->archdata.iommu;
699 struct sysmmu_list_data *list;
2a96536e 700 unsigned long flags;
3c2a0909
S
701 struct exynos_iovmm *vmm;
702 int plane;
2a96536e 703
3c2a0909 704 BUG_ON(!has_sysmmu(dev));
2a96536e 705
3c2a0909
S
706 vmm = exynos_get_iovmm(dev);
707 if (!vmm) {
708 dev_err(dev, "%s: IOVMM not found\n", __func__);
709 return;
710 }
711
712 plane = find_iovmm_plane(vmm, iova);
713 if (plane < 0) {
714 dev_err(dev, "%s: IOVA %pa is out of IOVMM\n", __func__, &iova);
715 return;
716 }
717
718 spin_lock_irqsave(&owner->lock, flags);
719
720 for_each_sysmmu_list(dev, list) {
721 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
722
723 spin_lock(&drvdata->lock);
724
725 if (is_sysmmu_active(drvdata) && drvdata->runtime_active) {
726 __master_clk_enable(drvdata);
727 if (drvdata->prop & SYSMMU_PROP_WINDOW_MASK) {
728 unsigned long prop;
729 prop = drvdata->prop & SYSMMU_PROP_WINDOW_MASK;
730 prop >>= SYSMMU_PROP_WINDOW_SHIFT;
731 if (prop & (1 << plane))
732 __exynos_sysmmu_set_df(drvdata, iova);
733 } else {
734 __exynos_sysmmu_set_df(drvdata, iova);
2a96536e 735 }
3c2a0909 736 __master_clk_disable(drvdata);
2a96536e 737 }
3c2a0909 738 spin_unlock(&drvdata->lock);
2a96536e
KC
739 }
740
3c2a0909 741 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
742}
743
3c2a0909 744void exynos_sysmmu_release_df(struct device *dev)
2a96536e 745{
3c2a0909
S
746 struct exynos_iommu_owner *owner = dev->archdata.iommu;
747 struct sysmmu_list_data *list;
2a96536e 748 unsigned long flags;
2a96536e 749
3c2a0909 750 BUG_ON(!has_sysmmu(dev));
2a96536e 751
3c2a0909
S
752 spin_lock_irqsave(&owner->lock, flags);
753
754 for_each_sysmmu_list(dev, list) {
755 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
756
757 spin_lock(&drvdata->lock);
758 if (is_sysmmu_active(drvdata) && drvdata->runtime_active) {
759 __master_clk_enable(drvdata);
760 __exynos_sysmmu_release_df(drvdata);
761 __master_clk_disable(drvdata);
2a96536e 762 }
3c2a0909 763 spin_unlock(&drvdata->lock);
2a96536e
KC
764 }
765
3c2a0909 766 spin_unlock_irqrestore(&owner->lock, flags);
2a96536e
KC
767}
768
3c2a0909
S
769static int __init __sysmmu_init_clock(struct device *sysmmu,
770 struct sysmmu_drvdata *drvdata)
2a96536e 771{
3c2a0909 772 int ret;
2a96536e 773
3c2a0909
S
774 drvdata->clocks[SYSMMU_PCLK] = ERR_PTR(-ENOENT);
775 drvdata->clocks[SYSMMU_MASTER] = ERR_PTR(-ENOENT);
2a96536e 776
3c2a0909
S
777 drvdata->clocks[SYSMMU_ACLK] = devm_clk_get(sysmmu, "sysmmu");
778 if (IS_ERR(drvdata->clocks[SYSMMU_ACLK])) {
779 if (PTR_ERR(drvdata->clocks[SYSMMU_ACLK]) == -ENOENT) {
780 dev_info(sysmmu, "No gating clock found.\n");
781 return 0;
782 }
783
784 dev_err(sysmmu, "Failed get sysmmu clock\n");
785 return PTR_ERR(drvdata->clocks[SYSMMU_ACLK]);
2a96536e
KC
786 }
787
3c2a0909 788 ret = clk_prepare(drvdata->clocks[SYSMMU_ACLK]);
2a96536e 789 if (ret) {
3c2a0909
S
790 dev_err(sysmmu, "Failed to prepare sysmmu clock\n");
791 return ret;
2a96536e
KC
792 }
793
3c2a0909
S
794 drvdata->clocks[SYSMMU_MASTER]= devm_clk_get(sysmmu, "master");
795 if (PTR_ERR(drvdata->clocks[SYSMMU_MASTER]) == -ENOENT) {
796 return 0;
797 } else if (IS_ERR(drvdata->clocks[SYSMMU_MASTER])) {
798 dev_err(sysmmu, "Failed to get master clock\n");
799 clk_unprepare(drvdata->clocks[SYSMMU_ACLK]);
800 return PTR_ERR(drvdata->clocks[SYSMMU_MASTER]);
801 }
802
803 ret = clk_prepare(drvdata->clocks[SYSMMU_MASTER]);
804 if (ret) {
805 clk_unprepare(drvdata->clocks[SYSMMU_ACLK]);
806 dev_err(sysmmu, "Failed to prepare master clock\n");
807 return ret;
2a96536e
KC
808 }
809
3c2a0909
S
810 return 0;
811}
812
813static int __init __sysmmu_init_master(struct device *dev)
814{
815 int ret;
816 int i = 0;
817 struct device_node *node;
818
819 while ((node = of_parse_phandle(dev->of_node, "mmu-masters", i++))) {
820 struct platform_device *master = of_find_device_by_node(node);
821 struct exynos_iommu_owner *owner;
822 struct sysmmu_list_data *list_data;
823
824 if (!master) {
825 dev_err(dev, "%s: mmu-master '%s' not found\n",
826 __func__, node->name);
827 ret = -EINVAL;
828 goto err;
2a96536e
KC
829 }
830
3c2a0909
S
831 owner = master->dev.archdata.iommu;
832 if (!owner) {
833 owner = devm_kzalloc(dev, sizeof(*owner), GFP_KERNEL);
834 if (!owner) {
835 dev_err(dev,
836 "%s: Failed to allocate owner structure\n",
837 __func__);
838 ret = -ENOMEM;
839 goto err;
840 }
841
842 INIT_LIST_HEAD(&owner->mmu_list);
843 INIT_LIST_HEAD(&owner->client);
844 owner->dev = &master->dev;
845 spin_lock_init(&owner->lock);
846
847 master->dev.archdata.iommu = owner;
848 if (!sysmmu_owner_list) {
849 sysmmu_owner_list = owner;
850 } else {
851 owner->next = sysmmu_owner_list->next;
852 sysmmu_owner_list->next = owner;
853 }
2a96536e 854 }
2a96536e 855
3c2a0909
S
856 list_data = devm_kzalloc(dev, sizeof(*list_data), GFP_KERNEL);
857 if (!list_data) {
858 dev_err(dev,
859 "%s: Failed to allocate sysmmu_list_data\n",
860 __func__);
861 ret = -ENOMEM;
862 goto err;
2a96536e
KC
863 }
864
3c2a0909
S
865 INIT_LIST_HEAD(&list_data->node);
866 list_data->sysmmu = dev;
867
868 /*
869 * System MMUs are attached in the order of the presence
870 * in device tree
871 */
872 list_add_tail(&list_data->node, &owner->mmu_list);
873 dev_info(dev, "--> %s\n", dev_name(&master->dev));
874 }
875
876 return 0;
877err:
878 while ((node = of_parse_phandle(dev->of_node, "mmu-masters", i++))) {
879 struct platform_device *master = of_find_device_by_node(node);
880 struct exynos_iommu_owner *owner;
881 struct sysmmu_list_data *list_data;
882
883 if (!master)
884 continue;
885
886 owner = master->dev.archdata.iommu;
887 if (!owner)
888 continue;
889
890 list_for_each_entry(list_data, &owner->mmu_list, node) {
891 if (list_data->sysmmu == dev) {
892 list_del(&list_data->node);
893 kfree(list_data);
894 break;
895 }
2a96536e
KC
896 }
897 }
898
3c2a0909
S
899 return ret;
900}
901
902static const char * const sysmmu_prop_opts[] = {
903 [SYSMMU_PROP_RESERVED] = "Reserved",
904 [SYSMMU_PROP_READ] = "r",
905 [SYSMMU_PROP_WRITE] = "w",
906 [SYSMMU_PROP_READWRITE] = "rw", /* default */
907};
2a96536e 908
3c2a0909
S
909static int __init __sysmmu_init_prop(struct device *sysmmu,
910 struct sysmmu_drvdata *drvdata)
911{
912 struct device_node *prop_node;
913 const char *s;
914 int winmap = 0;
915 unsigned int qos = DEFAULT_QOS_VALUE;
916 int ret;
2a96536e 917
3c2a0909 918 drvdata->prop = SYSMMU_PROP_READWRITE;
2a96536e 919
3c2a0909 920 ret = of_property_read_u32_index(sysmmu->of_node, "qos", 0, &qos);
2a96536e 921
3c2a0909
S
922 if ((ret == 0) && (qos > 15)) {
923 dev_err(sysmmu, "%s: Invalid QoS value %d specified\n",
924 __func__, qos);
925 qos = DEFAULT_QOS_VALUE;
926 }
2a96536e 927
3c2a0909
S
928 drvdata->qos = (short)qos;
929
930 /**
931 * Deprecate 'prop-map' child node of System MMU device nodes in FDT.
932 * It is not required to introduce new child node for boolean
933 * properties like 'block-stop' and 'tlbinv-nonblock'.
934 * 'tlbinv-nonblock' is H/W W/A to accellerates master H/W performance
935 * for 5.x and the earlier versions of System MMU.x.
936 * 'sysmmu,tlbinv-nonblock' is introduced, instead for those earlier
937 * versions.
938 * Instead of 'block-stop' in 'prop-map' childe node,
939 * 'sysmmu,block-when-stop' without a value is introduced to simplify
940 * the FDT node definitions.
941 * Likewise, prop-map.iomap and prop-map.winmap are replaced with
942 * sysmmu,pb-iomap and sysmmu,pb-winmap, respectively.
943 * For the compatibility with the existing FDT files, the 'prop-map'
944 * child node parsing is still kept.
945 */
946 prop_node = of_get_child_by_name(sysmmu->of_node, "prop-map");
947 if (prop_node) {
948 if (!of_property_read_string(prop_node, "iomap", &s)) {
949 int val;
950 for (val = 1; val < ARRAY_SIZE(sysmmu_prop_opts);
951 val++) {
952 if (!strcasecmp(s, sysmmu_prop_opts[val])) {
953 drvdata->prop &= ~SYSMMU_PROP_RW_MASK;
954 drvdata->prop |= val;
955 break;
956 }
957 }
958 } else if (!of_property_read_u32_index(
959 prop_node, "winmap", 0, &winmap)) {
960 if (winmap) {
961 drvdata->prop &= ~SYSMMU_PROP_RW_MASK;
962 drvdata->prop |=
963 winmap << SYSMMU_PROP_WINDOW_SHIFT;
964 }
2a96536e
KC
965 }
966
3c2a0909
S
967 if (!of_property_read_string(prop_node, "tlbinv-nonblock", &s))
968 if (strnicmp(s, "yes", 3) == 0)
969 drvdata->prop |= SYSMMU_PROP_NONBLOCK_TLBINV;
970
971 if (!of_property_read_string(prop_node, "block-stop", &s))
972 if (strnicmp(s, "yes", 3) == 0)
973 drvdata->prop |= SYSMMU_PROP_STOP_BLOCK;
974
975 of_node_put(prop_node);
2a96536e
KC
976 }
977
3c2a0909
S
978 if (!of_property_read_string(sysmmu->of_node, "sysmmu,pb-iomap", &s)) {
979 int val;
980 for (val = 1; val < ARRAY_SIZE(sysmmu_prop_opts); val++) {
981 if (!strcasecmp(s, sysmmu_prop_opts[val])) {
982 drvdata->prop &= ~SYSMMU_PROP_RW_MASK;
983 drvdata->prop |= val;
984 break;
985 }
986 }
987 } else if (!of_property_read_u32_index(
988 sysmmu->of_node, "sysmmu,pb-winmap", 0, &winmap)) {
989 if (winmap) {
990 drvdata->prop &= ~SYSMMU_PROP_RW_MASK;
991 drvdata->prop |= winmap << SYSMMU_PROP_WINDOW_SHIFT;
992 }
993 }
2a96536e 994
3c2a0909
S
995 if (of_find_property(sysmmu->of_node, "sysmmu,block-when-stop", NULL))
996 drvdata->prop |= SYSMMU_PROP_STOP_BLOCK;
2a96536e 997
3c2a0909
S
998 if (of_find_property(sysmmu->of_node, "sysmmu,tlbinv-nonblock", NULL))
999 drvdata->prop |= SYSMMU_PROP_NONBLOCK_TLBINV;
2a96536e 1000
2a96536e 1001 return 0;
2a96536e
KC
1002}
1003
3c2a0909
S
1004static int __init __sysmmu_setup(struct device *sysmmu,
1005 struct sysmmu_drvdata *drvdata)
1006{
1007 int ret;
2a96536e 1008
3c2a0909
S
1009 ret = __sysmmu_init_prop(sysmmu, drvdata);
1010 if (ret) {
1011 dev_err(sysmmu, "Failed to initialize sysmmu properties\n");
1012 return ret;
1013 }
1014
1015 ret = __sysmmu_init_clock(sysmmu, drvdata);
1016 if (ret) {
1017 dev_err(sysmmu, "Failed to initialize gating clocks\n");
1018 return ret;
1019 }
1020
1021 ret = __sysmmu_init_master(sysmmu);
1022 if (ret) {
1023 if (!IS_ERR(drvdata->clocks[SYSMMU_ACLK]))
1024 clk_unprepare(drvdata->clocks[SYSMMU_ACLK]);
1025 if (!IS_ERR(drvdata->clocks[SYSMMU_MASTER]))
1026 clk_unprepare(drvdata->clocks[SYSMMU_MASTER]);
1027 dev_err(sysmmu, "Failed to initialize master device.\n");
1028 }
1029
1030 return ret;
1031}
1032
1033static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 1034{
3c2a0909
S
1035 int ret;
1036 struct device *dev = &pdev->dev;
1037 struct sysmmu_drvdata *data;
1038 struct resource *res;
1039
1040 data = devm_kzalloc(dev, sizeof(*data) , GFP_KERNEL);
1041 if (!data) {
1042 dev_err(dev, "Not enough memory\n");
1043 return -ENOMEM;
1044 }
1045
1046 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047 if (!res) {
1048 dev_err(dev, "Unable to find IOMEM region\n");
1049 return -ENOENT;
1050 }
1051
1052 data->sfrbase = devm_request_and_ioremap(dev, res);
1053 if (!data->sfrbase) {
1054 dev_err(dev, "Unable to map IOMEM @ PA:%pa\n", &res->start);
1055 return -EBUSY;
1056 }
1057
1058 ret = platform_get_irq(pdev, 0);
1059 if (ret <= 0) {
1060 dev_err(dev, "Unable to find IRQ resource\n");
1061 return ret;
1062 }
1063
1064 ret = devm_request_irq(dev, ret, exynos_sysmmu_irq, 0,
1065 dev_name(dev), data);
1066 if (ret) {
1067 dev_err(dev, "Unabled to register interrupt handler\n");
1068 return ret;
1069 }
1070
1071 pm_runtime_enable(dev);
1072
1073 ret = exynos_iommu_init_event_log(SYSMMU_DRVDATA_TO_LOG(data),
1074 SYSMMU_LOG_LEN);
1075 if (!ret)
1076 sysmmu_add_log_to_debugfs(exynos_sysmmu_debugfs_root,
1077 SYSMMU_DRVDATA_TO_LOG(data), dev_name(dev));
1078 else
1079 return ret;
1080
1081 ret = __sysmmu_setup(dev, data);
1082 if (!ret) {
1083 data->runtime_active = !pm_runtime_enabled(dev);
1084 data->sysmmu = dev;
1085 spin_lock_init(&data->lock);
1086 if (!sysmmu_drvdata_list) {
1087 sysmmu_drvdata_list = data;
1088 } else {
1089 data->next = sysmmu_drvdata_list->next;
1090 sysmmu_drvdata_list->next = data;
1091 }
1092
1093 platform_set_drvdata(pdev, data);
1094
1095 dev_info(dev, "[OK]\n");
1096 }
1097
1098 return ret;
2a96536e
KC
1099}
1100
3c2a0909
S
1101#ifdef CONFIG_OF
1102static struct of_device_id sysmmu_of_match[] __initconst = {
1103 { .compatible = SYSMMU_OF_COMPAT_STRING, },
1104 { },
1105};
1106#endif
1107
1108static struct platform_driver exynos_sysmmu_driver __refdata = {
1109 .probe = exynos_sysmmu_probe,
1110 .driver = {
1111 .owner = THIS_MODULE,
1112 .name = MODULE_NAME,
1113 .of_match_table = of_match_ptr(sysmmu_of_match),
1114 }
1115};
1116
2a96536e
KC
1117static int exynos_iommu_domain_init(struct iommu_domain *domain)
1118{
1119 struct exynos_iommu_domain *priv;
3c2a0909 1120 int i;
2a96536e
KC
1121
1122 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1123 if (!priv)
1124 return -ENOMEM;
1125
3c2a0909 1126 priv->pgtable = (sysmmu_pte_t *)__get_free_pages(
2a96536e
KC
1127 GFP_KERNEL | __GFP_ZERO, 2);
1128 if (!priv->pgtable)
1129 goto err_pgtable;
1130
1131 priv->lv2entcnt = (short *)__get_free_pages(
1132 GFP_KERNEL | __GFP_ZERO, 1);
1133 if (!priv->lv2entcnt)
1134 goto err_counter;
1135
3c2a0909
S
1136 if (exynos_iommu_init_event_log(IOMMU_PRIV_TO_LOG(priv), IOMMU_LOG_LEN))
1137 goto err_init_event_log;
1138
1139 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
1140 priv->pgtable[i + 0] = ZERO_LV2LINK;
1141 priv->pgtable[i + 1] = ZERO_LV2LINK;
1142 priv->pgtable[i + 2] = ZERO_LV2LINK;
1143 priv->pgtable[i + 3] = ZERO_LV2LINK;
1144 priv->pgtable[i + 4] = ZERO_LV2LINK;
1145 priv->pgtable[i + 5] = ZERO_LV2LINK;
1146 priv->pgtable[i + 6] = ZERO_LV2LINK;
1147 priv->pgtable[i + 7] = ZERO_LV2LINK;
1148 }
1149
2a96536e
KC
1150 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
1151
1152 spin_lock_init(&priv->lock);
1153 spin_lock_init(&priv->pgtablelock);
1154 INIT_LIST_HEAD(&priv->clients);
1155
1156 domain->priv = priv;
3c2a0909
S
1157 domain->handler = recover_fault_handler;
1158
2a96536e
KC
1159 return 0;
1160
3c2a0909
S
1161err_init_event_log:
1162 free_pages((unsigned long)priv->lv2entcnt, 1);
2a96536e
KC
1163err_counter:
1164 free_pages((unsigned long)priv->pgtable, 2);
1165err_pgtable:
1166 kfree(priv);
1167 return -ENOMEM;
1168}
1169
1170static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
1171{
1172 struct exynos_iommu_domain *priv = domain->priv;
3c2a0909 1173 struct exynos_iommu_owner *owner;
2a96536e
KC
1174 unsigned long flags;
1175 int i;
1176
1177 WARN_ON(!list_empty(&priv->clients));
1178
1179 spin_lock_irqsave(&priv->lock, flags);
1180
3c2a0909
S
1181 list_for_each_entry(owner, &priv->clients, client)
1182 while (!exynos_sysmmu_disable(owner->dev))
2a96536e 1183 ; /* until System MMU is actually disabled */
3c2a0909
S
1184
1185 while (!list_empty(&priv->clients))
1186 list_del_init(priv->clients.next);
2a96536e
KC
1187
1188 spin_unlock_irqrestore(&priv->lock, flags);
1189
1190 for (i = 0; i < NUM_LV1ENTRIES; i++)
1191 if (lv1ent_page(priv->pgtable + i))
3c2a0909
S
1192 kmem_cache_free(lv2table_kmem_cache,
1193 phys_to_virt(lv2table_base(priv->pgtable + i)));
1194
1195 exynos_iommu_free_event_log(IOMMU_PRIV_TO_LOG(priv), IOMMU_LOG_LEN);
2a96536e
KC
1196
1197 free_pages((unsigned long)priv->pgtable, 2);
1198 free_pages((unsigned long)priv->lv2entcnt, 1);
1199 kfree(domain->priv);
1200 domain->priv = NULL;
1201}
1202
1203static int exynos_iommu_attach_device(struct iommu_domain *domain,
1204 struct device *dev)
1205{
3c2a0909 1206 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2a96536e 1207 struct exynos_iommu_domain *priv = domain->priv;
3c2a0909 1208 phys_addr_t pgtable = virt_to_phys(priv->pgtable);
2a96536e
KC
1209 unsigned long flags;
1210 int ret;
1211
2a96536e
KC
1212 spin_lock_irqsave(&priv->lock, flags);
1213
3c2a0909 1214 ret = __exynos_sysmmu_enable(dev, virt_to_phys(priv->pgtable), domain);
2a96536e 1215
3c2a0909
S
1216 if (ret == 0)
1217 list_add_tail(&owner->client, &priv->clients);
2a96536e
KC
1218
1219 spin_unlock_irqrestore(&priv->lock, flags);
1220
1221 if (ret < 0) {
3c2a0909
S
1222 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
1223 __func__, &pgtable);
2a96536e 1224 } else {
3c2a0909
S
1225 SYSMMU_EVENT_LOG_IOMMU_ATTACH(IOMMU_PRIV_TO_LOG(priv), dev);
1226 TRACE_LOG_DEV(dev,
1227 "%s: Attached new IOMMU with pgtable %pa %s\n",
1228 __func__, &pgtable, (ret == 0) ? "" : ", again");
2a96536e
KC
1229 }
1230
1231 return ret;
1232}
1233
1234static void exynos_iommu_detach_device(struct iommu_domain *domain,
1235 struct device *dev)
1236{
3c2a0909 1237 struct exynos_iommu_owner *owner;
2a96536e 1238 struct exynos_iommu_domain *priv = domain->priv;
2a96536e 1239 unsigned long flags;
2a96536e
KC
1240
1241 spin_lock_irqsave(&priv->lock, flags);
1242
3c2a0909
S
1243 list_for_each_entry(owner, &priv->clients, client) {
1244 if (owner == dev->archdata.iommu) {
1245 if (exynos_sysmmu_disable(dev))
1246 list_del_init(&owner->client);
2a96536e
KC
1247 break;
1248 }
1249 }
1250
3c2a0909 1251 spin_unlock_irqrestore(&priv->lock, flags);
2a96536e 1252
3c2a0909
S
1253 if (owner == dev->archdata.iommu) {
1254 SYSMMU_EVENT_LOG_IOMMU_DETACH(IOMMU_PRIV_TO_LOG(priv), dev);
1255 TRACE_LOG_DEV(dev, "%s: Detached IOMMU with pgtable %#lx\n",
1256 __func__, virt_to_phys(priv->pgtable));
2a96536e 1257 } else {
3c2a0909 1258 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
2a96536e 1259 }
2a96536e
KC
1260}
1261
3c2a0909
S
1262static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *priv,
1263 sysmmu_pte_t *sent, unsigned long iova, short *pgcounter)
2a96536e
KC
1264{
1265 if (lv1ent_fault(sent)) {
3c2a0909
S
1266 sysmmu_pte_t *pent;
1267 struct exynos_iommu_owner *owner;
1268 unsigned long flags;
2a96536e 1269
3c2a0909 1270 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
2a96536e
KC
1271 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
1272 if (!pent)
3c2a0909 1273 return ERR_PTR(-ENOMEM);
2a96536e 1274
3c2a0909
S
1275 *sent = mk_lv1ent_page(virt_to_phys(pent));
1276 kmemleak_ignore(pent);
2a96536e
KC
1277 *pgcounter = NUM_LV2ENTRIES;
1278 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
1279 pgtable_flush(sent, sent + 1);
3c2a0909
S
1280 SYSMMU_EVENT_LOG_IOMMU_ALLOCSLPD(IOMMU_PRIV_TO_LOG(priv),
1281 iova & SECT_MASK);
1282
1283 /*
1284 * If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
1285 * caches the address of zero_l2_table. This function replaces
1286 * the zero_l2_table with new L2 page table to write valid
1287 * mappings.
1288 * Accessing the valid area may cause page fault since FLPD
1289 * cache may still caches zero_l2_table for the valid area
1290 * instead of new L2 page table that have the mapping
1291 * information of the valid area
1292 * Thus any replacement of zero_l2_table with other valid L2
1293 * page table must involve FLPD cache invalidation if the System
1294 * MMU have prefetch feature and FLPD cache (version 3.3).
1295 * FLPD cache invalidation is performed with TLB invalidation
1296 * by VPN without blocking. It is safe to invalidate TLB without
1297 * blocking because the target address of TLB invalidation is
1298 * not currently mapped.
1299 */
1300 spin_lock_irqsave(&priv->lock, flags);
1301 list_for_each_entry(owner, &priv->clients, client)
1302 sysmmu_tlb_invalidate_flpdcache(owner->dev, iova);
1303 spin_unlock_irqrestore(&priv->lock, flags);
1304 } else if (!lv1ent_page(sent)) {
1305 BUG();
1306 return ERR_PTR(-EADDRINUSE);
2a96536e
KC
1307 }
1308
1309 return page_entry(sent, iova);
1310}
1311
3c2a0909
S
1312static int lv1ent_check_page(struct exynos_iommu_domain *priv,
1313 sysmmu_pte_t *sent, short *pgcnt)
2a96536e 1314{
2a96536e 1315 if (lv1ent_page(sent)) {
3c2a0909 1316 if (WARN_ON(*pgcnt != NUM_LV2ENTRIES))
2a96536e
KC
1317 return -EADDRINUSE;
1318
3c2a0909 1319 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
1320
1321 *pgcnt = 0;
3c2a0909
S
1322
1323 SYSMMU_EVENT_LOG_IOMMU_FREESLPD(IOMMU_PRIV_TO_LOG(priv),
1324 iova_from_sent(priv->pgtable, sent));
2a96536e
KC
1325 }
1326
3c2a0909
S
1327 return 0;
1328}
2a96536e 1329
3c2a0909
S
1330static void clear_lv1_page_table(sysmmu_pte_t *ent, int n)
1331{
1332 int i;
1333 for (i = 0; i < n; i++)
1334 ent[i] = ZERO_LV2LINK;
1335}
1336
1337static void clear_lv2_page_table(sysmmu_pte_t *ent, int n)
1338{
1339 if (n > 0)
1340 memset(ent, 0, sizeof(*ent) * n);
1341}
1342
1343static int lv1set_section(struct exynos_iommu_domain *priv,
1344 sysmmu_pte_t *sent, phys_addr_t paddr,
1345 size_t size, short *pgcnt)
1346{
1347 int ret;
1348
1349 if (WARN_ON(!lv1ent_fault(sent) && !lv1ent_page(sent)))
1350 return -EADDRINUSE;
1351
1352 if (size == SECT_SIZE) {
1353 ret = lv1ent_check_page(priv, sent, pgcnt);
1354 if (ret)
1355 return ret;
1356 *sent = mk_lv1ent_sect(paddr);
1357 pgtable_flush(sent, sent + 1);
1358 } else if (size == DSECT_SIZE) {
1359 int i;
1360 for (i = 0; i < SECT_PER_DSECT; i++, sent++, pgcnt++) {
1361 ret = lv1ent_check_page(priv, sent, pgcnt);
1362 if (ret) {
1363 clear_lv1_page_table(sent - i, i);
1364 return ret;
1365 }
1366 *sent = mk_lv1ent_dsect(paddr);
1367 }
1368 pgtable_flush(sent - SECT_PER_DSECT, sent);
1369 } else {
1370 int i;
1371 for (i = 0; i < SECT_PER_SPSECT; i++, sent++, pgcnt++) {
1372 ret = lv1ent_check_page(priv, sent, pgcnt);
1373 if (ret) {
1374 clear_lv1_page_table(sent - i, i);
1375 return ret;
1376 }
1377 *sent = mk_lv1ent_spsect(paddr);
1378 }
1379 pgtable_flush(sent - SECT_PER_SPSECT, sent);
1380 }
2a96536e
KC
1381
1382 return 0;
1383}
1384
3c2a0909
S
1385static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr,
1386 size_t size, short *pgcnt)
2a96536e
KC
1387{
1388 if (size == SPAGE_SIZE) {
3c2a0909 1389 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
1390 return -EADDRINUSE;
1391
1392 *pent = mk_lv2ent_spage(paddr);
1393 pgtable_flush(pent, pent + 1);
1394 *pgcnt -= 1;
1395 } else { /* size == LPAGE_SIZE */
1396 int i;
1397 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
3c2a0909
S
1398 if (WARN_ON(!lv2ent_fault(pent))) {
1399 clear_lv2_page_table(pent - i, i);
2a96536e
KC
1400 return -EADDRINUSE;
1401 }
1402
1403 *pent = mk_lv2ent_lpage(paddr);
1404 }
1405 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
1406 *pgcnt -= SPAGES_PER_LPAGE;
1407 }
1408
1409 return 0;
1410}
1411
1412static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
1413 phys_addr_t paddr, size_t size, int prot)
1414{
1415 struct exynos_iommu_domain *priv = domain->priv;
3c2a0909 1416 sysmmu_pte_t *entry;
2a96536e
KC
1417 unsigned long flags;
1418 int ret = -ENOMEM;
1419
1420 BUG_ON(priv->pgtable == NULL);
1421
1422 spin_lock_irqsave(&priv->pgtablelock, flags);
1423
1424 entry = section_entry(priv->pgtable, iova);
1425
3c2a0909
S
1426 if (size >= SECT_SIZE) {
1427 int num_entry = size / SECT_SIZE;
1428 struct exynos_iommu_owner *owner;
1429
1430 ret = lv1set_section(priv, entry, paddr, size,
2a96536e 1431 &priv->lv2entcnt[lv1ent_offset(iova)]);
3c2a0909
S
1432
1433 spin_lock(&priv->lock);
1434 list_for_each_entry(owner, &priv->clients, client) {
1435 int i;
1436 for (i = 0; i < num_entry; i++)
1437 sysmmu_tlb_invalidate_flpdcache(owner->dev,
1438 iova + i * SECT_SIZE);
1439 }
1440 spin_unlock(&priv->lock);
1441
1442 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv),
1443 iova, iova + size, paddr / SPAGE_SIZE);
2a96536e 1444 } else {
3c2a0909 1445 sysmmu_pte_t *pent;
2a96536e 1446
3c2a0909 1447 pent = alloc_lv2entry(priv, entry, iova,
2a96536e
KC
1448 &priv->lv2entcnt[lv1ent_offset(iova)]);
1449
3c2a0909
S
1450 if (IS_ERR(pent)) {
1451 ret = PTR_ERR(pent);
1452 } else {
2a96536e
KC
1453 ret = lv2set_page(pent, paddr, size,
1454 &priv->lv2entcnt[lv1ent_offset(iova)]);
2a96536e 1455
3c2a0909
S
1456 SYSMMU_EVENT_LOG_IOMMU_MAP(IOMMU_PRIV_TO_LOG(priv),
1457 iova, iova + size, paddr / SPAGE_SIZE);
1458 }
2a96536e
KC
1459 }
1460
3c2a0909
S
1461 if (ret)
1462 pr_err("%s: Failed(%d) to map %#zx bytes @ %pa\n",
1463 __func__, ret, size, &iova);
1464
2a96536e
KC
1465 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1466
1467 return ret;
1468}
1469
3c2a0909
S
1470static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
1471 unsigned long iova)
1472{
1473 struct exynos_iommu_owner *owner;
1474 unsigned long flags;
1475
1476 spin_lock_irqsave(&priv->lock, flags);
1477
1478 list_for_each_entry(owner, &priv->clients, client)
1479 sysmmu_tlb_invalidate_entry(owner->dev, iova, false);
1480
1481 spin_unlock_irqrestore(&priv->lock, flags);
1482}
1483
2a96536e 1484static size_t exynos_iommu_unmap(struct iommu_domain *domain,
3c2a0909 1485 unsigned long iova, size_t size)
2a96536e
KC
1486{
1487 struct exynos_iommu_domain *priv = domain->priv;
3c2a0909
S
1488 size_t err_pgsize;
1489 sysmmu_pte_t *ent;
2a96536e 1490 unsigned long flags;
2a96536e
KC
1491
1492 BUG_ON(priv->pgtable == NULL);
1493
1494 spin_lock_irqsave(&priv->pgtablelock, flags);
1495
1496 ent = section_entry(priv->pgtable, iova);
1497
3c2a0909
S
1498 if (lv1ent_spsection(ent)) {
1499 if (WARN_ON(size < SPSECT_SIZE)) {
1500 err_pgsize = SPSECT_SIZE;
1501 goto err;
1502 }
1503
1504 clear_lv1_page_table(ent, SECT_PER_SPSECT);
1505
1506 pgtable_flush(ent, ent + SECT_PER_SPSECT);
1507 size = SPSECT_SIZE;
1508 goto done;
1509 }
1510
1511 if (lv1ent_dsection(ent)) {
1512 if (WARN_ON(size < DSECT_SIZE)) {
1513 err_pgsize = DSECT_SIZE;
1514 goto err;
1515 }
1516
1517 *ent = ZERO_LV2LINK;
1518 *(++ent) = ZERO_LV2LINK;
1519 pgtable_flush(ent, ent + 2);
1520 size = DSECT_SIZE;
1521 goto done;
1522 }
1523
2a96536e 1524 if (lv1ent_section(ent)) {
3c2a0909
S
1525 if (WARN_ON(size < SECT_SIZE)) {
1526 err_pgsize = SECT_SIZE;
1527 goto err;
1528 }
2a96536e 1529
3c2a0909 1530 *ent = ZERO_LV2LINK;
2a96536e
KC
1531 pgtable_flush(ent, ent + 1);
1532 size = SECT_SIZE;
1533 goto done;
1534 }
1535
1536 if (unlikely(lv1ent_fault(ent))) {
1537 if (size > SECT_SIZE)
1538 size = SECT_SIZE;
1539 goto done;
1540 }
1541
1542 /* lv1ent_page(sent) == true here */
1543
1544 ent = page_entry(ent, iova);
1545
1546 if (unlikely(lv2ent_fault(ent))) {
1547 size = SPAGE_SIZE;
1548 goto done;
1549 }
1550
1551 if (lv2ent_small(ent)) {
1552 *ent = 0;
1553 size = SPAGE_SIZE;
3c2a0909 1554 pgtable_flush(ent, ent + 1);
2a96536e
KC
1555 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1556 goto done;
1557 }
1558
1559 /* lv1ent_large(ent) == true here */
3c2a0909
S
1560 if (WARN_ON(size < LPAGE_SIZE)) {
1561 err_pgsize = LPAGE_SIZE;
1562 goto err;
1563 }
2a96536e 1564
3c2a0909
S
1565 clear_lv2_page_table(ent, SPAGES_PER_LPAGE);
1566 pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
2a96536e
KC
1567
1568 size = LPAGE_SIZE;
1569 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1570done:
1571 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1572
3c2a0909
S
1573 SYSMMU_EVENT_LOG_IOMMU_UNMAP(IOMMU_PRIV_TO_LOG(priv),
1574 iova, iova + size);
2a96536e 1575
3c2a0909 1576 exynos_iommu_tlb_invalidate_entry(priv, iova);
2a96536e 1577
3c2a0909 1578 /* TLB invalidation is performed by IOVMM */
2a96536e 1579 return size;
3c2a0909
S
1580err:
1581 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1582
1583 pr_err("%s: Failed: size(%#zx) @ %pa is smaller than page size %#zx\n",
1584 __func__, size, &iova, err_pgsize);
1585
1586 return 0;
2a96536e
KC
1587}
1588
1589static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
3c2a0909 1590 dma_addr_t iova)
2a96536e
KC
1591{
1592 struct exynos_iommu_domain *priv = domain->priv;
2a96536e 1593 unsigned long flags;
3c2a0909 1594 sysmmu_pte_t *entry;
2a96536e
KC
1595 phys_addr_t phys = 0;
1596
1597 spin_lock_irqsave(&priv->pgtablelock, flags);
1598
1599 entry = section_entry(priv->pgtable, iova);
1600
3c2a0909
S
1601 if (lv1ent_spsection(entry)) {
1602 phys = spsection_phys(entry) + spsection_offs(iova);
1603 } else if (lv1ent_dsection(entry)) {
1604 phys = dsection_phys(entry) + dsection_offs(iova);
1605 } else if (lv1ent_section(entry)) {
2a96536e
KC
1606 phys = section_phys(entry) + section_offs(iova);
1607 } else if (lv1ent_page(entry)) {
1608 entry = page_entry(entry, iova);
1609
1610 if (lv2ent_large(entry))
1611 phys = lpage_phys(entry) + lpage_offs(iova);
1612 else if (lv2ent_small(entry))
1613 phys = spage_phys(entry) + spage_offs(iova);
1614 }
1615
1616 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1617
1618 return phys;
1619}
1620
1621static struct iommu_ops exynos_iommu_ops = {
1622 .domain_init = &exynos_iommu_domain_init,
1623 .domain_destroy = &exynos_iommu_domain_destroy,
1624 .attach_dev = &exynos_iommu_attach_device,
1625 .detach_dev = &exynos_iommu_detach_device,
1626 .map = &exynos_iommu_map,
1627 .unmap = &exynos_iommu_unmap,
1628 .iova_to_phys = &exynos_iommu_iova_to_phys,
3c2a0909 1629 .pgsize_bitmap = PGSIZE_BITMAP,
2a96536e
KC
1630};
1631
3c2a0909
S
1632static int __sysmmu_unmap_user_pages(struct device *dev,
1633 struct mm_struct *mm,
1634 unsigned long vaddr,
1635 exynos_iova_t iova,
1636 size_t size)
1637{
1638 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1639 struct exynos_iovmm *vmm = owner->vmm_data;
1640 struct iommu_domain *domain = vmm->domain;
1641 struct exynos_iommu_domain *priv = domain->priv;
1642 struct vm_area_struct *vma;
1643 unsigned long start = vaddr & PAGE_MASK;
1644 unsigned long end = PAGE_ALIGN(vaddr + size);
1645 bool is_pfnmap;
1646 sysmmu_pte_t *sent, *pent;
1647 int ret = 0;
1648
1649 down_read(&mm->mmap_sem);
1650
1651 BUG_ON((vaddr + size) < vaddr);
1652 /*
1653 * Assumes that the VMA is safe.
1654 * The caller must check the range of address space before calling this.
1655 */
1656 vma = find_vma(mm, vaddr);
1657 if (!vma) {
1658 pr_err("%s: vma is null\n", __func__);
1659 ret = -EINVAL;
1660 goto out_unmap;
1661 }
1662
1663 if (vma->vm_end < (vaddr + size)) {
1664 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
1665 __func__, vma->vm_start, vma->vm_end, vaddr, size);
1666 ret = -EINVAL;
1667 goto out_unmap;
1668 }
1669
1670 is_pfnmap = vma->vm_flags & VM_PFNMAP;
1671
1672 TRACE_LOG_DEV(dev, "%s: unmap starts @ %#zx@%#lx\n",
1673 __func__, size, start);
1674
1675 do {
1676 sysmmu_pte_t *pent_first;
1677
1678 sent = section_entry(priv->pgtable, iova);
1679 if (lv1ent_fault(sent)) {
1680 ret = -EFAULT;
1681 goto out_unmap;
1682 }
1683
1684 pent = page_entry(sent, iova);
1685 if (lv2ent_fault(pent)) {
1686 ret = -EFAULT;
1687 goto out_unmap;
1688 }
1689
1690 pent_first = pent;
1691
1692 do {
1693 if (!lv2ent_fault(pent) && !is_pfnmap)
1694 put_page(phys_to_page(spage_phys(pent)));
1695
1696 *pent = 0;
1697 if (lv2ent_offset(iova) == NUM_LV2ENTRIES - 1) {
1698 pgtable_flush(pent_first, pent);
1699 iova += PAGE_SIZE;
1700 sent = section_entry(priv->pgtable, iova);
1701 if (lv1ent_fault(sent)) {
1702 ret = -EFAULT;
1703 goto out_unmap;
1704 }
1705
1706 pent = page_entry(sent, iova);
1707 if (lv2ent_fault(pent)) {
1708 ret = -EFAULT;
1709 goto out_unmap;
1710 }
1711
1712 pent_first = pent;
1713 } else {
1714 iova += PAGE_SIZE;
1715 pent++;
1716 }
1717 } while (start += PAGE_SIZE, start != end);
1718
1719 if (pent_first != pent)
1720 pgtable_flush(pent_first, pent);
1721 } while (start != end);
1722
1723 TRACE_LOG_DEV(dev, "%s: unmap done @ %#lx\n", __func__, start);
1724
1725out_unmap:
1726 up_read(&mm->mmap_sem);
1727
1728 if (ret) {
1729 pr_debug("%s: Ignoring unmapping for %#lx ~ %#lx\n",
1730 __func__, start, end);
1731 }
1732
1733 return ret;
1734}
1735
1736static sysmmu_pte_t *alloc_lv2entry_fast(struct exynos_iommu_domain *priv,
1737 sysmmu_pte_t *sent, unsigned long iova)
1738{
1739 if (lv1ent_fault(sent)) {
1740 sysmmu_pte_t *pent;
1741
1742 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1743 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
1744 if (!pent)
1745 return ERR_PTR(-ENOMEM);
1746
1747 *sent = mk_lv1ent_page(virt_to_phys(pent));
1748 kmemleak_ignore(pent);
1749 pgtable_flush(sent, sent + 1);
1750 } else if (WARN_ON(!lv1ent_page(sent))) {
1751 return ERR_PTR(-EADDRINUSE);
1752 }
1753
1754 return page_entry(sent, iova);
1755}
1756
1757int exynos_sysmmu_map_user_pages(struct device *dev,
1758 struct mm_struct *mm,
1759 unsigned long vaddr,
1760 exynos_iova_t iova,
1761 size_t size, bool write,
1762 bool shareable)
1763{
1764 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1765 struct exynos_iovmm *vmm = owner->vmm_data;
1766 struct iommu_domain *domain = vmm->domain;
1767 struct exynos_iommu_domain *priv = domain->priv;
1768 exynos_iova_t iova_start = iova;
1769 struct vm_area_struct *vma;
1770 unsigned long start, end;
1771 unsigned long pgd_next;
1772 int ret = -EINVAL;
1773 bool is_pfnmap;
1774 pgd_t *pgd;
1775
1776 if (WARN_ON(size == 0))
1777 return 0;
1778
1779 down_read(&mm->mmap_sem);
1780
1781 /*
1782 * Assumes that the VMA is safe.
1783 * The caller must check the range of address space before calling this.
1784 */
1785 vma = find_vma(mm, vaddr);
1786 if (!vma) {
1787 pr_err("%s: vma is null\n", __func__);
1788 up_read(&mm->mmap_sem);
1789 return -EINVAL;
1790 }
1791
1792 if (vma->vm_end < (vaddr + size)) {
1793 pr_err("%s: vma overflow: %#lx--%#lx, vaddr: %#lx, size: %zd\n",
1794 __func__, vma->vm_start, vma->vm_end, vaddr, size);
1795 up_read(&mm->mmap_sem);
1796 return -EINVAL;
1797 }
1798
1799 is_pfnmap = vma->vm_flags & VM_PFNMAP;
1800
1801 start = vaddr & PAGE_MASK;
1802 end = PAGE_ALIGN(vaddr + size);
1803
1804 TRACE_LOG_DEV(dev, "%s: map @ %#lx--%#lx, %zd bytes, vm_flags: %#lx\n",
1805 __func__, start, end, size, vma->vm_flags);
1806
1807 pgd = pgd_offset(mm, start);
1808 do {
1809 unsigned long pmd_next;
1810 pmd_t *pmd;
1811
1812 if (pgd_none_or_clear_bad(pgd)) {
1813 ret = -EBADR;
1814 goto out_unmap;
1815 }
1816
1817 pgd_next = pgd_addr_end(start, end);
1818 pmd = pmd_offset((pud_t *)pgd, start);
1819
1820 do {
1821 pte_t *pte;
1822 sysmmu_pte_t *pent, *pent_first;
1823 sysmmu_pte_t *sent;
1824 spinlock_t *ptl;
1825
1826 if (pmd_none(*pmd)) {
1827 pmd = pmd_alloc(mm, (pud_t *)pgd, start);
1828 if (!pmd) {
1829 pr_err("%s: failed to alloc pmd\n",
1830 __func__);
1831 ret = -ENOMEM;
1832 goto out_unmap;
1833 }
1834
1835 if (__pte_alloc(mm, vma, pmd, start)) {
1836 pr_err("%s: failed to alloc pte\n",
1837 __func__);
1838 ret = -ENOMEM;
1839 goto out_unmap;
1840 }
1841 } else if (pmd_bad(*pmd)) {
1842 pr_err("%s: bad pmd value %#lx\n", __func__,
1843 (unsigned long)pmd_val(*pmd));
1844 pmd_clear_bad(pmd);
1845 ret = -EBADR;
1846 goto out_unmap;
1847 }
1848
1849 pmd_next = pmd_addr_end(start, pgd_next);
1850 pte = pte_offset_map(pmd, start);
1851
1852 sent = section_entry(priv->pgtable, iova);
1853 pent = alloc_lv2entry_fast(priv, sent, iova);
1854 if (IS_ERR(pent)) {
1855 ret = PTR_ERR(pent); /* ENOMEM or EADDRINUSE */
1856 goto out_unmap;
1857 }
1858
1859 pent_first = pent;
1860 ptl = pte_lockptr(mm, pmd);
1861
1862 spin_lock(ptl);
1863 do {
1864 WARN_ON(!lv2ent_fault(pent));
1865
1866 if (!pte_present(*pte) ||
1867 (write && !pte_write(*pte))) {
1868 if (pte_present(*pte) || pte_none(*pte)) {
1869 spin_unlock(ptl);
1870 ret = handle_pte_fault(mm,
1871 vma, start, pte, pmd,
1872 write ? FAULT_FLAG_WRITE : 0);
1873 if (IS_ERR_VALUE(ret)) {
1874 ret = -EIO;
1875 goto out_unmap;
1876 }
1877 spin_lock(ptl);
1878 }
1879 }
1880
1881 if (!pte_present(*pte) ||
1882 (write && !pte_write(*pte))) {
1883 ret = -EPERM;
1884 spin_unlock(ptl);
1885 goto out_unmap;
1886 }
1887
1888 if (!is_pfnmap)
1889 get_page(pte_page(*pte));
1890 *pent = mk_lv2ent_spage(__pfn_to_phys(
1891 pte_pfn(*pte)));
1892 if (shareable)
1893 set_lv2ent_shareable(pent);
1894
1895 if (lv2ent_offset(iova) == (NUM_LV2ENTRIES - 1)) {
1896 pgtable_flush(pent_first, pent);
1897 iova += PAGE_SIZE;
1898 sent = section_entry(priv->pgtable, iova);
1899 pent = alloc_lv2entry_fast(priv, sent, iova);
1900 if (IS_ERR(pent)) {
1901 ret = PTR_ERR(pent);
1902 spin_unlock(ptl);
1903 goto out_unmap;
1904 }
1905 pent_first = pent;
1906 } else {
1907 iova += PAGE_SIZE;
1908 pent++;
1909 }
1910 } while (pte++, start += PAGE_SIZE, start < pmd_next);
1911
1912 if (pent_first != pent)
1913 pgtable_flush(pent_first, pent);
1914 spin_unlock(ptl);
1915 } while (pmd++, start = pmd_next, start != pgd_next);
1916
1917 } while (pgd++, start = pgd_next, start != end);
1918
1919 ret = 0;
1920out_unmap:
1921 up_read(&mm->mmap_sem);
1922
1923 if (ret) {
1924 pr_debug("%s: Ignoring mapping for %#lx ~ %#lx\n",
1925 __func__, start, end);
1926 __sysmmu_unmap_user_pages(dev, mm, vaddr, iova_start,
1927 start - (vaddr & PAGE_MASK));
1928 }
1929
1930 return ret;
1931}
1932
1933int exynos_sysmmu_unmap_user_pages(struct device *dev,
1934 struct mm_struct *mm,
1935 unsigned long vaddr,
1936 exynos_iova_t iova,
1937 size_t size)
1938{
1939 if (WARN_ON(size == 0))
1940 return 0;
1941
1942 return __sysmmu_unmap_user_pages(dev, mm, vaddr, iova, size);
1943}
1944
2a96536e
KC
1945static int __init exynos_iommu_init(void)
1946{
3c2a0909
S
1947 struct page *page;
1948 int ret = -ENOMEM;
1949
1950 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1951 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1952 if (!lv2table_kmem_cache) {
1953 pr_err("%s: failed to create kmem cache\n", __func__);
1954 return -ENOMEM;
1955 }
1956
1957 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1958 if (!page) {
1959 pr_err("%s: failed to allocate fault page\n", __func__);
1960 goto err_fault_page;
1961 }
1962 fault_page = page_to_phys(page);
1963
1964 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1965 if (ret) {
1966 pr_err("%s: Failed to register IOMMU ops\n", __func__);
1967 goto err_set_iommu;
1968 }
1969
1970 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1971 if (zero_lv2_table == NULL) {
1972 pr_err("%s: Failed to allocate zero level2 page table\n",
1973 __func__);
1974 ret = -ENOMEM;
1975 goto err_zero_lv2;
1976 }
1977
1978 exynos_sysmmu_debugfs_root = debugfs_create_dir("sysmmu", NULL);
1979 if (!exynos_sysmmu_debugfs_root)
1980 pr_err("%s: Failed to create debugfs entry\n", __func__);
2a96536e
KC
1981
1982 ret = platform_driver_register(&exynos_sysmmu_driver);
3c2a0909
S
1983 if (ret) {
1984 pr_err("%s: Failed to register System MMU driver.\n", __func__);
1985 goto err_driver_register;
1986 }
1987
1988 return 0;
1989err_driver_register:
1990 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1991err_zero_lv2:
1992 bus_set_iommu(&platform_bus_type, NULL);
1993err_set_iommu:
1994 __free_page(page);
1995err_fault_page:
1996 kmem_cache_destroy(lv2table_kmem_cache);
1997 return ret;
1998}
1999arch_initcall_sync(exynos_iommu_init);
2000
2001#ifdef CONFIG_PM_SLEEP
2002static int sysmmu_pm_genpd_suspend(struct device *dev)
2003{
2004 struct sysmmu_list_data *list;
2005 int ret;
2006
2007 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2008
2009 ret = pm_generic_suspend(dev);
2010 if (ret) {
2011 TRACE_LOG("<----- %s(%s) Failed\n", __func__, dev_name(dev));
2012 return ret;
2013 }
2014
2015 for_each_sysmmu_list(dev, list) {
2016 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2017 unsigned long flags;
2018 TRACE_LOG("Suspending %s...\n", dev_name(drvdata->sysmmu));
2019 spin_lock_irqsave(&drvdata->lock, flags);
2020 if (!drvdata->suspended && is_sysmmu_active(drvdata) &&
2021 (!pm_runtime_enabled(dev) || drvdata->runtime_active))
2022 __sysmmu_disable_nocount(drvdata);
2023 drvdata->suspended = true;
2024 spin_unlock_irqrestore(&drvdata->lock, flags);
2025 }
2026
2027 TRACE_LOG("<----- %s(%s)\n", __func__, dev_name(dev));
2028
2029 return 0;
2030}
2031
2032static int sysmmu_pm_genpd_resume(struct device *dev)
2033{
2034 struct sysmmu_list_data *list;
2035 int ret;
2036
2037 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2038
2039 for_each_sysmmu_list(dev, list) {
2040 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2041 unsigned long flags;
2042 spin_lock_irqsave(&drvdata->lock, flags);
2043 if (drvdata->suspended && is_sysmmu_active(drvdata) &&
2044 (!pm_runtime_enabled(dev) || drvdata->runtime_active))
2045 __sysmmu_enable_nocount(drvdata);
2046 drvdata->suspended = false;
2047 spin_unlock_irqrestore(&drvdata->lock, flags);
2048 }
2049
2050 ret = pm_generic_resume(dev);
2051
2052 TRACE_LOG("<----- %s(%s) OK\n", __func__, dev_name(dev));
2053
2054 return ret;
2055}
2056#endif
2057
2058#ifdef CONFIG_PM_RUNTIME
2059static void sysmmu_restore_state(struct device *dev)
2060{
2061 struct sysmmu_list_data *list;
2062
2063 for_each_sysmmu_list(dev, list) {
2064 struct sysmmu_drvdata *data = dev_get_drvdata(list->sysmmu);
2065 unsigned long flags;
2066
2067 TRACE_LOG("%s(%s)\n", __func__, dev_name(data->sysmmu));
2068
2069 SYSMMU_EVENT_LOG_POWERON(SYSMMU_DRVDATA_TO_LOG(data));
2070
2071 spin_lock_irqsave(&data->lock, flags);
2072 if (!data->runtime_active && is_sysmmu_active(data))
2073 __sysmmu_enable_nocount(data);
2074 data->runtime_active = true;
2075 spin_unlock_irqrestore(&data->lock, flags);
2076 }
2077}
2078
2079static void sysmmu_save_state(struct device *dev)
2080{
2081 struct sysmmu_list_data *list;
2082
2083 for_each_sysmmu_list(dev, list) {
2084 struct sysmmu_drvdata *data = dev_get_drvdata(list->sysmmu);
2085 unsigned long flags;
2086
2087 TRACE_LOG("%s(%s)\n", __func__, dev_name(data->sysmmu));
2088
2089 SYSMMU_EVENT_LOG_POWEROFF(SYSMMU_DRVDATA_TO_LOG(data));
2090
2091 spin_lock_irqsave(&data->lock, flags);
2092 if (data->runtime_active && is_sysmmu_active(data))
2093 __sysmmu_disable_nocount(data);
2094 data->runtime_active = false;
2095 spin_unlock_irqrestore(&data->lock, flags);
2096 }
2097}
2098
2099static int sysmmu_pm_genpd_save_state(struct device *dev)
2100{
2101 int (*cb)(struct device *__dev);
2102 int ret = 0;
2103
2104 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2105
2106 if (dev->type && dev->type->pm)
2107 cb = dev->type->pm->runtime_suspend;
2108 else if (dev->class && dev->class->pm)
2109 cb = dev->class->pm->runtime_suspend;
2110 else if (dev->bus && dev->bus->pm)
2111 cb = dev->bus->pm->runtime_suspend;
2112 else
2113 cb = NULL;
2114
2115 if (!cb && dev->driver && dev->driver->pm)
2116 cb = dev->driver->pm->runtime_suspend;
2117
2118 if (cb)
2119 ret = cb(dev);
2a96536e
KC
2120
2121 if (ret == 0)
3c2a0909
S
2122 sysmmu_save_state(dev);
2123
2124 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__, dev_name(dev),
2125 cb, ret ? "Failed" : "OK");
2126
2127 return ret;
2128}
2129
2130static int sysmmu_pm_genpd_restore_state(struct device *dev)
2131{
2132 int (*cb)(struct device *__dev);
2133 int ret = 0;
2134
2135 TRACE_LOG("%s(%s) ----->\n", __func__, dev_name(dev));
2136
2137 if (dev->type && dev->type->pm)
2138 cb = dev->type->pm->runtime_resume;
2139 else if (dev->class && dev->class->pm)
2140 cb = dev->class->pm->runtime_resume;
2141 else if (dev->bus && dev->bus->pm)
2142 cb = dev->bus->pm->runtime_resume;
2143 else
2144 cb = NULL;
2145
2146 if (!cb && dev->driver && dev->driver->pm)
2147 cb = dev->driver->pm->runtime_resume;
2148
2149 sysmmu_restore_state(dev);
2150
2151 if (cb)
2152 ret = cb(dev);
2153
2154 if (ret)
2155 sysmmu_save_state(dev);
2156
2157 TRACE_LOG("<----- %s(%s) (cb = %pS) %s\n", __func__, dev_name(dev),
2158 cb, ret ? "Failed" : "OK");
2159
2160 return ret;
2161}
2162#endif
2163
2164#ifdef CONFIG_PM_GENERIC_DOMAINS
2165static struct gpd_dev_ops sysmmu_devpm_ops = {
2166#ifdef CONFIG_PM_RUNTIME
2167 .save_state = &sysmmu_pm_genpd_save_state,
2168 .restore_state = &sysmmu_pm_genpd_restore_state,
2169#endif
2170#ifdef CONFIG_PM_SLEEP
2171 .suspend = &sysmmu_pm_genpd_suspend,
2172 .resume = &sysmmu_pm_genpd_resume,
2173#endif
2174};
2175#endif /* CONFIG_PM_GENERIC_DOMAINS */
2176
2177#ifdef CONFIG_PM_GENERIC_DOMAINS
2178static int sysmmu_hook_driver_register(struct notifier_block *nb,
2179 unsigned long val,
2180 void *p)
2181{
2182 struct device *dev = p;
2183
2184 /*
2185 * No System MMU assigned. See exynos_sysmmu_probe().
2186 */
2187 if (dev->archdata.iommu == NULL)
2188 return 0;
2189
2190 switch (val) {
2191 case BUS_NOTIFY_BIND_DRIVER:
2192 {
2193 if (dev->pm_domain) {
2194 int ret = pm_genpd_add_callbacks(
2195 dev, &sysmmu_devpm_ops, NULL);
2196 if (ret && (ret != -ENOSYS)) {
2197 dev_err(dev,
2198 "Failed to register 'dev_pm_ops' for iommu\n");
2199 return ret;
2200 }
2201
2202 dev_info(dev, "exynos-iommu gpd_dev_ops inserted!\n");
2203 }
2204
2205 break;
2206 }
2207 case BUS_NOTIFY_BOUND_DRIVER:
2208 {
2209 struct sysmmu_list_data *list;
2210
2211 if (pm_runtime_enabled(dev) && dev->pm_domain)
2212 break;
2213
2214 for_each_sysmmu_list(dev, list) {
2215 struct sysmmu_drvdata *data =
2216 dev_get_drvdata(list->sysmmu);
2217 unsigned long flags;
2218 spin_lock_irqsave(&data->lock, flags);
2219 if (is_sysmmu_active(data) && !data->runtime_active)
2220 __sysmmu_enable_nocount(data);
2221 data->runtime_active = true;
2222 pm_runtime_disable(data->sysmmu);
2223 spin_unlock_irqrestore(&data->lock, flags);
2224 }
2225
2226 break;
2227 }
2228 case BUS_NOTIFY_UNBOUND_DRIVER:
2229 {
2230 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2231 WARN_ON(!list_empty(&owner->client));
2232 __pm_genpd_remove_callbacks(dev, false);
2233 dev_info(dev, "exynos-iommu gpd_dev_ops removed!\n");
2234 break;
2235 }
2236 } /* switch (val) */
2237
2238 return 0;
2239}
2240
2241static struct notifier_block sysmmu_notifier = {
2242 .notifier_call = &sysmmu_hook_driver_register,
2243};
2244
2245static int __init exynos_iommu_prepare(void)
2246{
2247 return bus_register_notifier(&platform_bus_type, &sysmmu_notifier);
2248}
2249subsys_initcall_sync(exynos_iommu_prepare);
2250#endif
2251
2252static void sysmmu_dump_lv2_page_table(unsigned int lv1idx, sysmmu_pte_t *base)
2253{
2254 unsigned int i;
2255 for (i = 0; i < NUM_LV2ENTRIES; i += 4) {
2256 if (!base[i] && !base[i + 1] && !base[i + 2] && !base[i + 3])
2257 continue;
2258 pr_info(" LV2[%04d][%03d] %08x %08x %08x %08x\n",
2259 lv1idx, i,
2260 base[i], base[i + 1], base[i + 2], base[i + 3]);
2261 }
2262}
2263
2264static void sysmmu_dump_page_table(sysmmu_pte_t *base)
2265{
2266 unsigned int i;
2267 phys_addr_t phys_base = virt_to_phys(base);
2268
2269 pr_info("---- System MMU Page Table @ %pa (ZeroLv2Desc: %#x) ----\n",
2270 &phys_base, ZERO_LV2LINK);
2271
2272 for (i = 0; i < NUM_LV1ENTRIES; i += 4) {
2273 unsigned int j;
2274 if ((base[i] == ZERO_LV2LINK) &&
2275 (base[i + 1] == ZERO_LV2LINK) &&
2276 (base[i + 2] == ZERO_LV2LINK) &&
2277 (base[i + 3] == ZERO_LV2LINK))
2278 continue;
2279 pr_info("LV1[%04d] %08x %08x %08x %08x\n",
2280 i, base[i], base[i + 1], base[i + 2], base[i + 3]);
2281
2282 for (j = 0; j < 4; j++)
2283 if (lv1ent_page(&base[i + j]))
2284 sysmmu_dump_lv2_page_table(i + j,
2285 page_entry(&base[i + j], 0));
2286 }
2287}
2288
2289void exynos_sysmmu_show_status(struct device *dev)
2290{
2291 struct sysmmu_list_data *list;
2292
2293 for_each_sysmmu_list(dev, list) {
2294 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2295
2296 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
2297 dev_info(drvdata->sysmmu,
2298 "%s: System MMU is not active\n", __func__);
2299 continue;
2300 }
2301
2302 pr_info("DUMPING SYSTEM MMU: %s\n", dev_name(drvdata->sysmmu));
2303
2304 __master_clk_enable(drvdata);
2305 if (sysmmu_block(drvdata->sfrbase))
2306 dump_sysmmu_tlb_pb(drvdata->sfrbase);
2307 else
2308 pr_err("!!Failed to block Sytem MMU!\n");
2309 sysmmu_unblock(drvdata->sfrbase);
2310
2311 __master_clk_disable(drvdata);
2312 }
2313}
2314
2315void exynos_sysmmu_dump_pgtable(struct device *dev)
2316{
2317 struct exynos_iommu_owner *owner = dev->archdata.iommu;
2318 struct sysmmu_list_data *list =
2319 list_entry(&owner->mmu_list, struct sysmmu_list_data, node);
2320 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2321
2322 sysmmu_dump_page_table(phys_to_virt(drvdata->pgtable));
2323}
2324
2325void exynos_sysmmu_show_ppc_event(struct device *dev)
2326{
2327 struct sysmmu_list_data *list;
2328 unsigned long flags;
2329
2330 for_each_sysmmu_list(dev, list) {
2331 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2332
2333 spin_lock_irqsave(&drvdata->lock, flags);
2334 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
2335 dev_info(drvdata->sysmmu,
2336 "%s: System MMU is not active\n", __func__);
2337 spin_unlock_irqrestore(&drvdata->lock, flags);
2338 continue;
2339 }
2340
2341 __master_clk_enable(drvdata);
2342 if (sysmmu_block(drvdata->sfrbase))
2343 dump_sysmmu_ppc_cnt(drvdata);
2344 else
2345 pr_err("!!Failed to block Sytem MMU!\n");
2346 sysmmu_unblock(drvdata->sfrbase);
2347 __master_clk_disable(drvdata);
2348 spin_unlock_irqrestore(&drvdata->lock, flags);
2349 }
2350}
2351
2352void exynos_sysmmu_clear_ppc_event(struct device *dev)
2353{
2354 struct sysmmu_list_data *list;
2355 unsigned long flags;
2356
2357 for_each_sysmmu_list(dev, list) {
2358 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2359
2360 spin_lock_irqsave(&drvdata->lock, flags);
2361 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
2362 dev_info(drvdata->sysmmu,
2363 "%s: System MMU is not active\n", __func__);
2364 spin_unlock_irqrestore(&drvdata->lock, flags);
2365 continue;
2366 }
2367
2368 __master_clk_enable(drvdata);
2369 if (sysmmu_block(drvdata->sfrbase)) {
2370 dump_sysmmu_ppc_cnt(drvdata);
2371 __raw_writel(0x2, drvdata->sfrbase + REG_PPC_PMNC);
2372 __raw_writel(0, drvdata->sfrbase + REG_PPC_CNTENS);
2373 __raw_writel(0, drvdata->sfrbase + REG_PPC_INTENS);
2374 drvdata->event_cnt = 0;
2375 } else
2376 pr_err("!!Failed to block Sytem MMU!\n");
2377 sysmmu_unblock(drvdata->sfrbase);
2378 __master_clk_disable(drvdata);
2379
2380 spin_unlock_irqrestore(&drvdata->lock, flags);
2381 }
2382}
2383
2384int exynos_sysmmu_set_ppc_event(struct device *dev, int event)
2385{
2386 struct sysmmu_list_data *list;
2387 unsigned long flags;
2388 int ret = 0;
2389
2390 for_each_sysmmu_list(dev, list) {
2391 struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
2392
2393 spin_lock_irqsave(&drvdata->lock, flags);
2394 if (!is_sysmmu_active(drvdata) || !drvdata->runtime_active) {
2395 dev_info(drvdata->sysmmu,
2396 "%s: System MMU is not active\n", __func__);
2397 spin_unlock_irqrestore(&drvdata->lock, flags);
2398 continue;
2399 }
2400
2401 __master_clk_enable(drvdata);
2402 if (sysmmu_block(drvdata->sfrbase)) {
2403 if (drvdata->event_cnt < MAX_NUM_PPC) {
2404 ret = sysmmu_set_ppc_event(drvdata, event);
2405 if (ret)
2406 pr_err("Not supported Event ID (%d)",
2407 event);
2408 else
2409 drvdata->event_cnt++;
2410 }
2411 } else
2412 pr_err("!!Failed to block Sytem MMU!\n");
2413 sysmmu_unblock(drvdata->sfrbase);
2414 __master_clk_disable(drvdata);
2415
2416 spin_unlock_irqrestore(&drvdata->lock, flags);
2417 }
2a96536e
KC
2418
2419 return ret;
2420}