Fix common misspellings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / gma500 / psb_mmu.c
CommitLineData
0867b421
AC
1/**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 **************************************************************************/
18#include <drm/drmP.h>
19#include "psb_drv.h"
20#include "psb_reg.h"
21
22/*
23 * Code for the SGX MMU:
24 */
25
26/*
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
29 * SMP system.
30 */
31
32/*
33 * kmap atomic:
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
38 * user.
39 */
40
41/*
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
45 * may fail.
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
48 */
49
50struct psb_mmu_driver {
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
53 */
54 struct rw_semaphore sem;
55
56 /* protects page tables, directory tables and pt tables.
57 * and pt structures.
58 */
59 spinlock_t lock;
60
61 atomic_t needs_tlbflush;
62
63 uint8_t __iomem *register_map;
64 struct psb_mmu_pd *default_pd;
65 /*uint32_t bif_ctrl;*/
66 int has_clflush;
67 int clflush_add;
68 unsigned long clflush_mask;
69
70 struct drm_psb_private *dev_priv;
71};
72
73struct psb_mmu_pd;
74
75struct psb_mmu_pt {
76 struct psb_mmu_pd *pd;
77 uint32_t index;
78 uint32_t count;
79 struct page *p;
80 uint32_t *v;
81};
82
83struct psb_mmu_pd {
84 struct psb_mmu_driver *driver;
85 int hw_context;
86 struct psb_mmu_pt **tables;
87 struct page *p;
88 struct page *dummy_pt;
89 struct page *dummy_page;
90 uint32_t pd_mask;
91 uint32_t invalid_pde;
92 uint32_t invalid_pte;
93};
94
95static inline uint32_t psb_mmu_pt_index(uint32_t offset)
96{
97 return (offset >> PSB_PTE_SHIFT) & 0x3FF;
98}
99
100static inline uint32_t psb_mmu_pd_index(uint32_t offset)
101{
102 return offset >> PSB_PDE_SHIFT;
103}
104
105static inline void psb_clflush(void *addr)
106{
107 __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
108}
109
110static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
111 void *addr)
112{
113 if (!driver->has_clflush)
114 return;
115
116 mb();
117 psb_clflush(addr);
118 mb();
119}
120
121static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
122{
123 uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
124 uint32_t clflush_count = PAGE_SIZE / clflush_add;
125 int i;
126 uint8_t *clf;
127
128 clf = kmap_atomic(page, KM_USER0);
129 mb();
130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf);
132 clf += clflush_add;
133 }
134 mb();
135 kunmap_atomic(clf, KM_USER0);
136}
137
138static void psb_pages_clflush(struct psb_mmu_driver *driver,
139 struct page *page[], unsigned long num_pages)
140{
141 int i;
142
143 if (!driver->has_clflush)
144 return ;
145
146 for (i = 0; i < num_pages; i++)
147 psb_page_clflush(driver, *page++);
148}
149
150static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
151 int force)
152{
153 atomic_set(&driver->needs_tlbflush, 0);
154}
155
156static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
157{
158 down_write(&driver->sem);
159 psb_mmu_flush_pd_locked(driver, force);
160 up_write(&driver->sem);
161}
162
163void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
164{
165 if (rc_prot)
166 down_write(&driver->sem);
167 if (rc_prot)
168 up_write(&driver->sem);
169}
170
171void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
172{
173 /*ttm_tt_cache_flush(&pd->p, 1);*/
174 psb_pages_clflush(pd->driver, &pd->p, 1);
175 down_write(&pd->driver->sem);
176 wmb();
177 psb_mmu_flush_pd_locked(pd->driver, 1);
178 pd->hw_context = hw_context;
179 up_write(&pd->driver->sem);
180
181}
182
183static inline unsigned long psb_pd_addr_end(unsigned long addr,
184 unsigned long end)
185{
186
187 addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
188 return (addr < end) ? addr : end;
189}
190
191static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
192{
193 uint32_t mask = PSB_PTE_VALID;
194
195 if (type & PSB_MMU_CACHED_MEMORY)
196 mask |= PSB_PTE_CACHED;
197 if (type & PSB_MMU_RO_MEMORY)
198 mask |= PSB_PTE_RO;
199 if (type & PSB_MMU_WO_MEMORY)
200 mask |= PSB_PTE_WO;
201
202 return (pfn << PAGE_SHIFT) | mask;
203}
204
205struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
206 int trap_pagefaults, int invalid_type)
207{
208 struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
209 uint32_t *v;
210 int i;
211
212 if (!pd)
213 return NULL;
214
215 pd->p = alloc_page(GFP_DMA32);
216 if (!pd->p)
217 goto out_err1;
218 pd->dummy_pt = alloc_page(GFP_DMA32);
219 if (!pd->dummy_pt)
220 goto out_err2;
221 pd->dummy_page = alloc_page(GFP_DMA32);
222 if (!pd->dummy_page)
223 goto out_err3;
224
225 if (!trap_pagefaults) {
226 pd->invalid_pde =
227 psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
228 invalid_type);
229 pd->invalid_pte =
230 psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
231 invalid_type);
232 } else {
233 pd->invalid_pde = 0;
234 pd->invalid_pte = 0;
235 }
236
237 v = kmap(pd->dummy_pt);
238 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
239 v[i] = pd->invalid_pte;
240
241 kunmap(pd->dummy_pt);
242
243 v = kmap(pd->p);
244 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
245 v[i] = pd->invalid_pde;
246
247 kunmap(pd->p);
248
249 clear_page(kmap(pd->dummy_page));
250 kunmap(pd->dummy_page);
251
252 pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
253 if (!pd->tables)
254 goto out_err4;
255
256 pd->hw_context = -1;
257 pd->pd_mask = PSB_PTE_VALID;
258 pd->driver = driver;
259
260 return pd;
261
262out_err4:
263 __free_page(pd->dummy_page);
264out_err3:
265 __free_page(pd->dummy_pt);
266out_err2:
267 __free_page(pd->p);
268out_err1:
269 kfree(pd);
270 return NULL;
271}
272
273void psb_mmu_free_pt(struct psb_mmu_pt *pt)
274{
275 __free_page(pt->p);
276 kfree(pt);
277}
278
279void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
280{
281 struct psb_mmu_driver *driver = pd->driver;
282 struct psb_mmu_pt *pt;
283 int i;
284
285 down_write(&driver->sem);
286 if (pd->hw_context != -1)
287 psb_mmu_flush_pd_locked(driver, 1);
288
289 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */
291
292 for (i = 0; i < 1024; ++i) {
293 pt = pd->tables[i];
294 if (pt)
295 psb_mmu_free_pt(pt);
296 }
297
298 vfree(pd->tables);
299 __free_page(pd->dummy_page);
300 __free_page(pd->dummy_pt);
301 __free_page(pd->p);
302 kfree(pd);
303 up_write(&driver->sem);
304}
305
306static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
307{
308 struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
309 void *v;
310 uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
311 uint32_t clflush_count = PAGE_SIZE / clflush_add;
312 spinlock_t *lock = &pd->driver->lock;
313 uint8_t *clf;
314 uint32_t *ptes;
315 int i;
316
317 if (!pt)
318 return NULL;
319
320 pt->p = alloc_page(GFP_DMA32);
321 if (!pt->p) {
322 kfree(pt);
323 return NULL;
324 }
325
326 spin_lock(lock);
327
328 v = kmap_atomic(pt->p, KM_USER0);
329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
332 *ptes++ = pd->invalid_pte;
333
334
335 if (pd->driver->has_clflush && pd->hw_context != -1) {
336 mb();
337 for (i = 0; i < clflush_count; ++i) {
338 psb_clflush(clf);
339 clf += clflush_add;
340 }
341 mb();
342 }
343
344 kunmap_atomic(v, KM_USER0);
345 spin_unlock(lock);
346
347 pt->count = 0;
348 pt->pd = pd;
349 pt->index = 0;
350
351 return pt;
352}
353
354struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
355 unsigned long addr)
356{
357 uint32_t index = psb_mmu_pd_index(addr);
358 struct psb_mmu_pt *pt;
359 uint32_t *v;
360 spinlock_t *lock = &pd->driver->lock;
361
362 spin_lock(lock);
363 pt = pd->tables[index];
364 while (!pt) {
365 spin_unlock(lock);
366 pt = psb_mmu_alloc_pt(pd);
367 if (!pt)
368 return NULL;
369 spin_lock(lock);
370
371 if (pd->tables[index]) {
372 spin_unlock(lock);
373 psb_mmu_free_pt(pt);
374 spin_lock(lock);
375 pt = pd->tables[index];
376 continue;
377 }
378
379 v = kmap_atomic(pd->p, KM_USER0);
380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0);
384
385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 }
389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0);
391 return pt;
392}
393
394static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
395 unsigned long addr)
396{
397 uint32_t index = psb_mmu_pd_index(addr);
398 struct psb_mmu_pt *pt;
399 spinlock_t *lock = &pd->driver->lock;
400
401 spin_lock(lock);
402 pt = pd->tables[index];
403 if (!pt) {
404 spin_unlock(lock);
405 return NULL;
406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0);
408 return pt;
409}
410
411static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
412{
413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v;
415
416 kunmap_atomic(pt->v, KM_USER0);
417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0);
419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL;
421
422 if (pd->hw_context != -1) {
423 psb_mmu_clflush(pd->driver,
424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 }
427 kunmap_atomic(pt->v, KM_USER0);
428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt);
430 return;
431 }
432 spin_unlock(&pd->driver->lock);
433}
434
435static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
436 unsigned long addr, uint32_t pte)
437{
438 pt->v[psb_mmu_pt_index(addr)] = pte;
439}
440
441static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
442 unsigned long addr)
443{
444 pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
445}
446
447#if 0
448static uint32_t psb_mmu_check_pte_locked(struct psb_mmu_pd *pd,
449 uint32_t mmu_offset)
450{
451 uint32_t *v;
452 uint32_t pfn;
453
454 v = kmap_atomic(pd->p, KM_USER0);
455 if (!v) {
456 printk(KERN_INFO "Could not kmap pde page.\n");
457 return 0;
458 }
459 pfn = v[psb_mmu_pd_index(mmu_offset)];
460 /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */
461 kunmap_atomic(v, KM_USER0);
462 if (((pfn & 0x0F) != PSB_PTE_VALID)) {
463 printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n",
464 mmu_offset, pfn);
465 }
466 v = ioremap(pfn & 0xFFFFF000, 4096);
467 if (!v) {
468 printk(KERN_INFO "Could not kmap pte page.\n");
469 return 0;
470 }
471 pfn = v[psb_mmu_pt_index(mmu_offset)];
472 /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */
473 iounmap(v);
474 if (((pfn & 0x0F) != PSB_PTE_VALID)) {
475 printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n",
476 mmu_offset, pfn);
477 }
478 return pfn >> PAGE_SHIFT;
479}
480
481static void psb_mmu_check_mirrored_gtt(struct psb_mmu_pd *pd,
482 uint32_t mmu_offset,
483 uint32_t gtt_pages)
484{
485 uint32_t start;
486 uint32_t next;
487
488 printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n",
489 mmu_offset, gtt_pages);
490 down_read(&pd->driver->sem);
491 start = psb_mmu_check_pte_locked(pd, mmu_offset);
492 mmu_offset += PAGE_SIZE;
493 gtt_pages -= 1;
494 while (gtt_pages--) {
495 next = psb_mmu_check_pte_locked(pd, mmu_offset);
496 if (next != start + 1) {
497 printk(KERN_INFO
498 "Ptes out of order: 0x%08x, 0x%08x.\n",
499 start, next);
500 }
501 start = next;
502 mmu_offset += PAGE_SIZE;
503 }
504 up_read(&pd->driver->sem);
505}
506
507#endif
508
509void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
510 uint32_t mmu_offset, uint32_t gtt_start,
511 uint32_t gtt_pages)
512{
513 uint32_t *v;
514 uint32_t start = psb_mmu_pd_index(mmu_offset);
515 struct psb_mmu_driver *driver = pd->driver;
516 int num_pages = gtt_pages;
517
518 down_read(&driver->sem);
519 spin_lock(&driver->lock);
520
521 v = kmap_atomic(pd->p, KM_USER0);
522 v += start;
523
524 while (gtt_pages--) {
525 *v++ = gtt_start | pd->pd_mask;
526 gtt_start += PAGE_SIZE;
527 }
528
529 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
530 psb_pages_clflush(pd->driver, &pd->p, num_pages);
531 kunmap_atomic(v, KM_USER0);
532 spin_unlock(&driver->lock);
533
534 if (pd->hw_context != -1)
535 atomic_set(&pd->driver->needs_tlbflush, 1);
536
537 up_read(&pd->driver->sem);
538 psb_mmu_flush_pd(pd->driver, 0);
539}
540
541struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
542{
543 struct psb_mmu_pd *pd;
544
545 /* down_read(&driver->sem); */
546 pd = driver->default_pd;
547 /* up_read(&driver->sem); */
548
549 return pd;
550}
551
552/* Returns the physical address of the PD shared by sgx/msvdx */
553uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
554{
555 struct psb_mmu_pd *pd;
556
557 pd = psb_mmu_get_default_pd(driver);
558 return page_to_pfn(pd->p) << PAGE_SHIFT;
559}
560
561void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
562{
563 psb_mmu_free_pagedir(driver->default_pd);
564 kfree(driver);
565}
566
567struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
568 int trap_pagefaults,
569 int invalid_type,
570 struct drm_psb_private *dev_priv)
571{
572 struct psb_mmu_driver *driver;
573
574 driver = kmalloc(sizeof(*driver), GFP_KERNEL);
575
576 if (!driver)
577 return NULL;
578 driver->dev_priv = dev_priv;
579
580 driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
581 invalid_type);
582 if (!driver->default_pd)
583 goto out_err1;
584
585 spin_lock_init(&driver->lock);
586 init_rwsem(&driver->sem);
587 down_write(&driver->sem);
588 driver->register_map = registers;
589 atomic_set(&driver->needs_tlbflush, 1);
590
591 driver->has_clflush = 0;
592
593 if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
594 uint32_t tfms, misc, cap0, cap4, clflush_size;
595
596 /*
597 * clflush size is determined at kernel setup for x86_64
598 * but not for i386. We have to do it here.
599 */
600
601 cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
602 clflush_size = ((misc >> 8) & 0xff) * 8;
603 driver->has_clflush = 1;
604 driver->clflush_add =
605 PAGE_SIZE * clflush_size / sizeof(uint32_t);
606 driver->clflush_mask = driver->clflush_add - 1;
607 driver->clflush_mask = ~driver->clflush_mask;
608 }
609
610 up_write(&driver->sem);
611 return driver;
612
613out_err1:
614 kfree(driver);
615 return NULL;
616}
617
618static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
619 unsigned long address, uint32_t num_pages,
620 uint32_t desired_tile_stride,
621 uint32_t hw_tile_stride)
622{
623 struct psb_mmu_pt *pt;
624 uint32_t rows = 1;
625 uint32_t i;
626 unsigned long addr;
627 unsigned long end;
628 unsigned long next;
629 unsigned long add;
630 unsigned long row_add;
631 unsigned long clflush_add = pd->driver->clflush_add;
632 unsigned long clflush_mask = pd->driver->clflush_mask;
633
634 if (!pd->driver->has_clflush) {
635 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
636 psb_pages_clflush(pd->driver, &pd->p, num_pages);
637 return;
638 }
639
640 if (hw_tile_stride)
641 rows = num_pages / desired_tile_stride;
642 else
643 desired_tile_stride = num_pages;
644
645 add = desired_tile_stride << PAGE_SHIFT;
646 row_add = hw_tile_stride << PAGE_SHIFT;
647 mb();
648 for (i = 0; i < rows; ++i) {
649
650 addr = address;
651 end = addr + add;
652
653 do {
654 next = psb_pd_addr_end(addr, end);
655 pt = psb_mmu_pt_map_lock(pd, addr);
656 if (!pt)
657 continue;
658 do {
659 psb_clflush(&pt->v
660 [psb_mmu_pt_index(addr)]);
661 } while (addr +=
662 clflush_add,
663 (addr & clflush_mask) < next);
664
665 psb_mmu_pt_unmap_unlock(pt);
666 } while (addr = next, next != end);
667 address += row_add;
668 }
669 mb();
670}
671
672void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
673 unsigned long address, uint32_t num_pages)
674{
675 struct psb_mmu_pt *pt;
676 unsigned long addr;
677 unsigned long end;
678 unsigned long next;
679 unsigned long f_address = address;
680
681 down_read(&pd->driver->sem);
682
683 addr = address;
684 end = addr + (num_pages << PAGE_SHIFT);
685
686 do {
687 next = psb_pd_addr_end(addr, end);
688 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
689 if (!pt)
690 goto out;
691 do {
692 psb_mmu_invalidate_pte(pt, addr);
693 --pt->count;
694 } while (addr += PAGE_SIZE, addr < next);
695 psb_mmu_pt_unmap_unlock(pt);
696
697 } while (addr = next, next != end);
698
699out:
700 if (pd->hw_context != -1)
701 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
702
703 up_read(&pd->driver->sem);
704
705 if (pd->hw_context != -1)
706 psb_mmu_flush(pd->driver, 0);
707
708 return;
709}
710
711void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
712 uint32_t num_pages, uint32_t desired_tile_stride,
713 uint32_t hw_tile_stride)
714{
715 struct psb_mmu_pt *pt;
716 uint32_t rows = 1;
717 uint32_t i;
718 unsigned long addr;
719 unsigned long end;
720 unsigned long next;
721 unsigned long add;
722 unsigned long row_add;
723 unsigned long f_address = address;
724
725 if (hw_tile_stride)
726 rows = num_pages / desired_tile_stride;
727 else
728 desired_tile_stride = num_pages;
729
730 add = desired_tile_stride << PAGE_SHIFT;
731 row_add = hw_tile_stride << PAGE_SHIFT;
732
733 /* down_read(&pd->driver->sem); */
734
735 /* Make sure we only need to flush this processor's cache */
736
737 for (i = 0; i < rows; ++i) {
738
739 addr = address;
740 end = addr + add;
741
742 do {
743 next = psb_pd_addr_end(addr, end);
744 pt = psb_mmu_pt_map_lock(pd, addr);
745 if (!pt)
746 continue;
747 do {
748 psb_mmu_invalidate_pte(pt, addr);
749 --pt->count;
750
751 } while (addr += PAGE_SIZE, addr < next);
752 psb_mmu_pt_unmap_unlock(pt);
753
754 } while (addr = next, next != end);
755 address += row_add;
756 }
757 if (pd->hw_context != -1)
758 psb_mmu_flush_ptes(pd, f_address, num_pages,
759 desired_tile_stride, hw_tile_stride);
760
761 /* up_read(&pd->driver->sem); */
762
763 if (pd->hw_context != -1)
764 psb_mmu_flush(pd->driver, 0);
765}
766
767int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
768 unsigned long address, uint32_t num_pages,
769 int type)
770{
771 struct psb_mmu_pt *pt;
772 uint32_t pte;
773 unsigned long addr;
774 unsigned long end;
775 unsigned long next;
776 unsigned long f_address = address;
777 int ret = 0;
778
779 down_read(&pd->driver->sem);
780
781 addr = address;
782 end = addr + (num_pages << PAGE_SHIFT);
783
784 do {
785 next = psb_pd_addr_end(addr, end);
786 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
787 if (!pt) {
788 ret = -ENOMEM;
789 goto out;
790 }
791 do {
792 pte = psb_mmu_mask_pte(start_pfn++, type);
793 psb_mmu_set_pte(pt, addr, pte);
794 pt->count++;
795 } while (addr += PAGE_SIZE, addr < next);
796 psb_mmu_pt_unmap_unlock(pt);
797
798 } while (addr = next, next != end);
799
800out:
801 if (pd->hw_context != -1)
802 psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
803
804 up_read(&pd->driver->sem);
805
806 if (pd->hw_context != -1)
807 psb_mmu_flush(pd->driver, 1);
808
809 return ret;
810}
811
812int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
813 unsigned long address, uint32_t num_pages,
814 uint32_t desired_tile_stride,
815 uint32_t hw_tile_stride, int type)
816{
817 struct psb_mmu_pt *pt;
818 uint32_t rows = 1;
819 uint32_t i;
820 uint32_t pte;
821 unsigned long addr;
822 unsigned long end;
823 unsigned long next;
824 unsigned long add;
825 unsigned long row_add;
826 unsigned long f_address = address;
827 int ret = 0;
828
829 if (hw_tile_stride) {
830 if (num_pages % desired_tile_stride != 0)
831 return -EINVAL;
832 rows = num_pages / desired_tile_stride;
833 } else {
834 desired_tile_stride = num_pages;
835 }
836
837 add = desired_tile_stride << PAGE_SHIFT;
838 row_add = hw_tile_stride << PAGE_SHIFT;
839
840 down_read(&pd->driver->sem);
841
842 for (i = 0; i < rows; ++i) {
843
844 addr = address;
845 end = addr + add;
846
847 do {
848 next = psb_pd_addr_end(addr, end);
849 pt = psb_mmu_pt_alloc_map_lock(pd, addr);
850 if (!pt) {
851 ret = -ENOMEM;
852 goto out;
853 }
854 do {
855 pte =
856 psb_mmu_mask_pte(page_to_pfn(*pages++),
857 type);
858 psb_mmu_set_pte(pt, addr, pte);
859 pt->count++;
860 } while (addr += PAGE_SIZE, addr < next);
861 psb_mmu_pt_unmap_unlock(pt);
862
863 } while (addr = next, next != end);
864
865 address += row_add;
866 }
867out:
868 if (pd->hw_context != -1)
869 psb_mmu_flush_ptes(pd, f_address, num_pages,
870 desired_tile_stride, hw_tile_stride);
871
872 up_read(&pd->driver->sem);
873
874 if (pd->hw_context != -1)
875 psb_mmu_flush(pd->driver, 1);
876
877 return ret;
878}
879
880int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
881 unsigned long *pfn)
882{
883 int ret;
884 struct psb_mmu_pt *pt;
885 uint32_t tmp;
886 spinlock_t *lock = &pd->driver->lock;
887
888 down_read(&pd->driver->sem);
889 pt = psb_mmu_pt_map_lock(pd, virtual);
890 if (!pt) {
891 uint32_t *v;
892
893 spin_lock(lock);
894 v = kmap_atomic(pd->p, KM_USER0);
895 tmp = v[psb_mmu_pd_index(virtual)];
896 kunmap_atomic(v, KM_USER0);
897 spin_unlock(lock);
898
899 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
900 !(pd->invalid_pte & PSB_PTE_VALID)) {
901 ret = -EINVAL;
902 goto out;
903 }
904 ret = 0;
905 *pfn = pd->invalid_pte >> PAGE_SHIFT;
906 goto out;
907 }
908 tmp = pt->v[psb_mmu_pt_index(virtual)];
909 if (!(tmp & PSB_PTE_VALID)) {
910 ret = -EINVAL;
911 } else {
912 ret = 0;
913 *pfn = tmp >> PAGE_SHIFT;
914 }
915 psb_mmu_pt_unmap_unlock(pt);
916out:
917 up_read(&pd->driver->sem);
918 return ret;
919}