gma500: remove the second argument of k[un]map_atomic()
authorCong Wang <amwang@redhat.com>
Fri, 25 Nov 2011 15:46:51 +0000 (23:46 +0800)
committerCong Wang <xiyou.wangcong@gmail.com>
Tue, 20 Mar 2012 13:48:29 +0000 (21:48 +0800)
Signed-off-by: Cong Wang <amwang@redhat.com>
drivers/gpu/drm/gma500/mmu.c

index c904d73b1de3f99e9d391ccc0fd6a81d4635f104..e80ee82f6caf0544716212f4743f83de37fa9ab2 100644 (file)
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
        int i;
        uint8_t *clf;
 
-       clf = kmap_atomic(page, KM_USER0);
+       clf = kmap_atomic(page);
        mb();
        for (i = 0; i < clflush_count; ++i) {
                psb_clflush(clf);
                clf += clflush_add;
        }
        mb();
-       kunmap_atomic(clf, KM_USER0);
+       kunmap_atomic(clf);
 }
 
 static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
 
        spin_lock(lock);
 
-       v = kmap_atomic(pt->p, KM_USER0);
+       v = kmap_atomic(pt->p);
        clf = (uint8_t *) v;
        ptes = (uint32_t *) v;
        for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
                mb();
        }
 
-       kunmap_atomic(v, KM_USER0);
+       kunmap_atomic(v);
        spin_unlock(lock);
 
        pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
                        continue;
                }
 
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                pd->tables[index] = pt;
                v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
                pt->index = index;
-               kunmap_atomic((void *) v, KM_USER0);
+               kunmap_atomic((void *) v);
 
                if (pd->hw_context != -1) {
                        psb_mmu_clflush(pd->driver, (void *) &v[index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
        }
-       pt->v = kmap_atomic(pt->p, KM_USER0);
+       pt->v = kmap_atomic(pt->p);
        return pt;
 }
 
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
                spin_unlock(lock);
                return NULL;
        }
-       pt->v = kmap_atomic(pt->p, KM_USER0);
+       pt->v = kmap_atomic(pt->p);
        return pt;
 }
 
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
        struct psb_mmu_pd *pd = pt->pd;
        uint32_t *v;
 
-       kunmap_atomic(pt->v, KM_USER0);
+       kunmap_atomic(pt->v);
        if (pt->count == 0) {
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                v[pt->index] = pd->invalid_pde;
                pd->tables[pt->index] = NULL;
 
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
                                        (void *) &v[pt->index]);
                        atomic_set(&pd->driver->needs_tlbflush, 1);
                }
-               kunmap_atomic(pt->v, KM_USER0);
+               kunmap_atomic(pt->v);
                spin_unlock(&pd->driver->lock);
                psb_mmu_free_pt(pt);
                return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
        down_read(&driver->sem);
        spin_lock(&driver->lock);
 
-       v = kmap_atomic(pd->p, KM_USER0);
+       v = kmap_atomic(pd->p);
        v += start;
 
        while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
 
        /*ttm_tt_cache_flush(&pd->p, num_pages);*/
        psb_pages_clflush(pd->driver, &pd->p, num_pages);
-       kunmap_atomic(v, KM_USER0);
+       kunmap_atomic(v);
        spin_unlock(&driver->lock);
 
        if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
                uint32_t *v;
 
                spin_lock(lock);
-               v = kmap_atomic(pd->p, KM_USER0);
+               v = kmap_atomic(pd->p);
                tmp = v[psb_mmu_pd_index(virtual)];
-               kunmap_atomic(v, KM_USER0);
+               kunmap_atomic(v);
                spin_unlock(lock);
 
                if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||