drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / iommu / intel-iommu.c
index b4f0e28dfa41d6bbff0314783360d455dd6c45ef..db4e10d4c7f545d9ad1e4a8914868046b47785f9 100644 (file)
@@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
        int offset;
 
        BUG_ON(!domain->pgd);
-       BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
+
+       if (addr_width < BITS_PER_LONG && pfn >> addr_width)
+               /* Address beyond IOMMU's addressing capabilities. */
+               return NULL;
+
        parent = domain->pgd;
 
        while (level > 0) {
@@ -890,56 +894,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
        return order;
 }
 
+static void dma_pte_free_level(struct dmar_domain *domain, int level,
+                              struct dma_pte *pte, unsigned long pfn,
+                              unsigned long start_pfn, unsigned long last_pfn)
+{
+       pfn = max(start_pfn, pfn);
+       pte = &pte[pfn_level_offset(pfn, level)];
+
+       do {
+               unsigned long level_pfn;
+               struct dma_pte *level_pte;
+
+               if (!dma_pte_present(pte) || dma_pte_superpage(pte))
+                       goto next;
+
+               level_pfn = pfn & level_mask(level - 1);
+               level_pte = phys_to_virt(dma_pte_addr(pte));
+
+               if (level > 2)
+                       dma_pte_free_level(domain, level - 1, level_pte,
+                                          level_pfn, start_pfn, last_pfn);
+
+               /* If range covers entire pagetable, free it */
+               if (!(start_pfn > level_pfn ||
+                     last_pfn < level_pfn + level_size(level) - 1)) {
+                       dma_clear_pte(pte);
+                       domain_flush_cache(domain, pte, sizeof(*pte));
+                       free_pgtable_page(level_pte);
+               }
+next:
+               pfn += level_size(level);
+       } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
+}
+
 /* free page table pages. last level pte should already be cleared */
 static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                   unsigned long start_pfn,
                                   unsigned long last_pfn)
 {
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       struct dma_pte *first_pte, *pte;
-       int total = agaw_to_level(domain->agaw);
-       int level;
-       unsigned long tmp;
-       int large_page = 2;
 
        BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
        BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
        BUG_ON(start_pfn > last_pfn);
 
        /* We don't need lock here; nobody else touches the iova range */
-       level = 2;
-       while (level <= total) {
-               tmp = align_to_level(start_pfn, level);
-
-               /* If we can't even clear one PTE at this level, we're done */
-               if (tmp + level_size(level) - 1 > last_pfn)
-                       return;
-
-               do {
-                       large_page = level;
-                       first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
-                       if (large_page > level)
-                               level = large_page + 1;
-                       if (!pte) {
-                               tmp = align_to_level(tmp + 1, level + 1);
-                               continue;
-                       }
-                       do {
-                               if (dma_pte_present(pte)) {
-                                       free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
-                                       dma_clear_pte(pte);
-                               }
-                               pte++;
-                               tmp += level_size(level);
-                       } while (!first_pte_in_page(pte) &&
-                                tmp + level_size(level) - 1 <= last_pfn);
+       dma_pte_free_level(domain, agaw_to_level(domain->agaw),
+                          domain->pgd, 0, start_pfn, last_pfn);
 
-                       domain_flush_cache(domain, first_pte,
-                                          (void *)pte - (void *)first_pte);
-                       
-               } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
-               level++;
-       }
        /* free pgd */
        if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
                free_pgtable_page(domain->pgd);
@@ -1794,7 +1796,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        struct dma_pte *first_pte = NULL, *pte = NULL;
        phys_addr_t uninitialized_var(pteval);
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       unsigned long sg_res;
+       unsigned long sg_res = 0;
        unsigned int largepage_lvl = 0;
        unsigned long lvl_pages = 0;
 
@@ -1805,10 +1807,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 
        prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
 
-       if (sg)
-               sg_res = 0;
-       else {
-               sg_res = nr_pages + 1;
+       if (!sg) {
+               sg_res = nr_pages;
                pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
        }