if (!em || IS_ERR(em))
return 0;
- // XXX(hch): block 0 is valid in some cases, e.g. XFS RT device
if (em->block_start == EXTENT_MAP_INLINE ||
em->block_start == EXTENT_MAP_HOLE)
return 0;
WARN_ON(!PageUptodate(page));
cur = min(len, (PAGE_CACHE_SIZE - offset));
- // kaddr = kmap_atomic(page, KM_USER0);
- kaddr = page_address(page);
+ kaddr = kmap_atomic(page, KM_USER0);
memcpy(dst, kaddr + offset, cur);
- // kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
dst += cur;
len -= cur;
*map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
}
- // kaddr = kmap_atomic(eb->pages[i], km);
- kaddr = page_address(extent_buffer_page(eb, i));
+ kaddr = kmap_atomic(extent_buffer_page(eb, i), km);
*token = kaddr;
*map = kaddr + offset;
*map_len = PAGE_CACHE_SIZE - offset;
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
{
- // kunmap_atomic(token, km);
+ kunmap_atomic(token, km);
}
EXPORT_SYMBOL(unmap_extent_buffer);
cur = min(len, (PAGE_CACHE_SIZE - offset));
- // kaddr = kmap_atomic(page, KM_USER0);
- kaddr = page_address(page);
+ kaddr = kmap_atomic(page, KM_USER0);
ret = memcmp(ptr, kaddr + offset, cur);
- // kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
if (ret)
break;
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
- // kaddr = kmap_atomic(page, KM_USER0);
- kaddr = page_address(page);
+ kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr + offset, src, cur);
- // kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
src += cur;
len -= cur;
WARN_ON(!PageUptodate(page));
cur = min(len, PAGE_CACHE_SIZE - offset);
- // kaddr = kmap_atomic(page, KM_USER0);
- kaddr = page_address(page);
+ kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr + offset, c, cur);
- // kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr, KM_USER0);
len -= cur;
offset = 0;
cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
- // kaddr = kmap_atomic(page, KM_USER1);
- kaddr = page_address(page);
+ kaddr = kmap_atomic(page, KM_USER1);
read_extent_buffer(src, kaddr + offset, src_offset, cur);
- // kunmap_atomic(kaddr, KM_USER1);
+ kunmap_atomic(kaddr, KM_USER1);
src_offset += cur;
len -= cur;
unsigned long dst_off, unsigned long src_off,
unsigned long len)
{
- // char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
- char *dst_kaddr = page_address(dst_page);
+ char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
if (dst_page == src_page) {
memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
} else {
- // char *src_kaddr = kmap_atomic(src_page, KM_USER1);
- char *src_kaddr = page_address(src_page);
+ char *src_kaddr = kmap_atomic(src_page, KM_USER1);
char *p = dst_kaddr + dst_off + len;
char *s = src_kaddr + src_off + len;
while (len--)
*--p = *--s;
- // kunmap_atomic(src_kaddr, KM_USER1);
+ kunmap_atomic(src_kaddr, KM_USER1);
}
- // kunmap_atomic(dst_kaddr, KM_USER0);
+ kunmap_atomic(dst_kaddr, KM_USER0);
}
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
{
- //kmap_atomic(dst_page, KM_USER0);
- char *dst_kaddr = page_address(dst_page);
+ char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
char *src_kaddr;
if (dst_page != src_page)
- src_kaddr = page_address(src_page); // kmap_atomic(src_page, KM_USER1);
+ src_kaddr = kmap_atomic(src_page, KM_USER1);
else
src_kaddr = dst_kaddr;
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
- /*
kunmap_atomic(dst_kaddr, KM_USER0);
if (dst_page != src_page)
kunmap_atomic(src_kaddr, KM_USER1);
- */
}
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
cur = min(len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
-// printk("move pages orig dst %lu src %lu len %lu, this %lu %lu %lu\n", dst_offset, src_offset, len, dst_off_in_page - cur + 1, src_off_in_page - cur + 1, cur);
+
move_pages(extent_buffer_page(dst, dst_i),
extent_buffer_page(dst, src_i),
dst_off_in_page - cur + 1,