/* add to list to be waited for by daemon */
struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
item->page = page;
- page_cache_get(page);
+ get_page(page);
spin_lock(&bitmap->write_lock);
list_add(&item->list, &bitmap->complete_pages);
spin_unlock(&bitmap->write_lock);
struct inode *inode = file->f_mapping->host;
struct page *page = NULL;
loff_t isize = i_size_read(inode);
- unsigned long end_index = isize >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = isize >> PAGE_SHIFT;
- PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE,
- (unsigned long long)index << PAGE_CACHE_SHIFT);
+ PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT);
page = read_cache_page(inode->i_mapping, index,
(filler_t *)inode->i_mapping->a_ops->readpage, file);
goto out;
wait_on_page_locked(page);
if (!PageUptodate(page) || PageError(page)) {
- page_cache_release(page);
+ put_page(page);
page = ERR_PTR(-EIO);
goto out;
}
if (index > end_index) /* we have read beyond EOF */
*bytes_read = 0;
else if (index == end_index) /* possible short read */
- *bytes_read = isize & ~PAGE_CACHE_MASK;
+ *bytes_read = isize & ~PAGE_MASK;
else
- *bytes_read = PAGE_CACHE_SIZE; /* got a full page */
+ *bytes_read = PAGE_SIZE; /* got a full page */
out:
if (IS_ERR(page))
printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
- (int)PAGE_CACHE_SIZE,
- (unsigned long long)index << PAGE_CACHE_SHIFT,
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
PTR_ERR(page));
return page;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
return;
}
- page_cache_get(bitmap->sb_page);
+ get_page(bitmap->sb_page);
spin_unlock_irqrestore(&bitmap->lock, flags);
sb = (bitmap_super_t *)kmap(bitmap->sb_page);
switch (op) {
default: BUG();
}
kunmap(bitmap->sb_page);
- page_cache_release(bitmap->sb_page);
+ put_page(bitmap->sb_page);
}
/*
while (pages--)
if (map[pages]->index != 0) /* 0 is sb_page, release it below */
- page_cache_release(map[pages]);
+ put_page(map[pages]);
kfree(map);
kfree(attr);
if (sb_page)
- page_cache_release(sb_page);
+ put_page(sb_page);
}
static void bitmap_stop_daemon(struct bitmap *bitmap);
while ((item = dequeue_page(bitmap))) {
/* don't bother to wait */
- page_cache_release(item->page);
+ put_page(item->page);
mempool_free(item, bitmap->write_pool);
}
/* make sure the page stays cached until it gets written out */
if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
- page_cache_get(page);
+ get_page(page);
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
if (ret) {
kunmap(page);
/* release, page not in filemap yet */
- page_cache_release(page);
+ put_page(page);
goto out;
}
}
/* skip this page unless it's marked as needing cleaning */
if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
if (attr & BITMAP_PAGE_NEEDWRITE) {
- page_cache_get(page);
+ get_page(page);
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
}
spin_unlock_irqrestore(&bitmap->lock, flags);
default:
bitmap_file_kick(bitmap);
}
- page_cache_release(page);
+ put_page(page);
}
continue;
}
/* grab the new page, sync and release the old */
- page_cache_get(page);
+ get_page(page);
if (lastpage != NULL) {
if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
kunmap(lastpage);
- page_cache_release(lastpage);
+ put_page(lastpage);
if (err)
bitmap_file_kick(bitmap);
} else
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- page_cache_release(lastpage);
+ put_page(lastpage);
}
return err;
PRINTK("finished page writeback: %p\n", page);
err = PageError(page);
- page_cache_release(page);
+ put_page(page);
if (err) {
printk(KERN_WARNING "%s: bitmap file writeback "
"failed (page %lu): %d\n",
out_free_pages:
for (i=0; i < RESYNC_PAGES ; i++)
for (j=0 ; j < pi->raid_disks; j++)
- __free_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
+ put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
while ( ++j < pi->raid_disks )
if (j == 0 ||
r1bio->bios[j]->bi_io_vec[i].bv_page !=
r1bio->bios[0]->bi_io_vec[i].bv_page)
- __free_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
+ put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
}
for (i=0 ; i < pi->raid_disks; i++)
bio_put(r1bio->bios[i]);
/* free extra copy of the data pages */
int i = bio->bi_vcnt;
while (i--)
- __free_page(bio->bi_io_vec[i].bv_page);
+ put_page(bio->bi_io_vec[i].bv_page);
}
/* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
do_sync_io:
if (pages)
for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
- __free_page(pages[i]);
+ put_page(pages[i]);
kfree(pages);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
return NULL;
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
- __free_page(conf->tmppage);
+ put_page(conf->tmppage);
kfree(conf->poolinfo);
kfree(conf);
mddev->private = NULL;
out_free_pages:
for ( ; i > 0 ; i--)
- __free_page(bio->bi_io_vec[i-1].bv_page);
+ put_page(bio->bi_io_vec[i-1].bv_page);
while (j--)
for (i = 0; i < RESYNC_PAGES ; i++)
- __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
+ put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
while ( ++j < nalloc )
struct bio *bio = r10bio->devs[j].bio;
if (bio) {
for (i = 0; i < RESYNC_PAGES; i++) {
- __free_page(bio->bi_io_vec[i].bv_page);
+ put_page(bio->bi_io_vec[i].bv_page);
bio->bi_io_vec[i].bv_page = NULL;
}
bio_put(bio);
* maybe...
*/
{
- int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE;
+ int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;