p->flags = SWP_USED;
spin_unlock(&swap_lock);
spin_lock_init(&p->lock);
+ spin_lock_init(&p->cont_lock);
return p;
}
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
+ spin_lock(&si->cont_lock);
/*
* Page allocation does not initialize the page's lru field,
* but it does always reset its private field.
* a continuation page, free our allocation and use this one.
*/
if (!(count & COUNT_CONTINUED))
- goto out;
+ goto out_unlock_cont;
map = kmap_atomic(list_page) + offset;
count = *map;
* free our allocation and use this one.
*/
if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
- goto out;
+ goto out_unlock_cont;
}
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
+out_unlock_cont:
+ spin_unlock(&si->cont_lock);
out:
unlock_cluster(ci);
spin_unlock(&si->lock);
struct page *head;
struct page *page;
unsigned char *map;
+ bool ret;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
return false; /* need to add count continuation */
}
+ spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
map = kmap_atomic(page) + offset;
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
- if (page == head)
- return false; /* add count continuation */
+ if (page == head) {
+ ret = false; /* add count continuation */
+ goto out;
+ }
map = kmap_atomic(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
- return true; /* incremented */
+ ret = true; /* incremented */
} else { /* decrementing */
/*
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
- return count == COUNT_CONTINUED;
+ ret = count == COUNT_CONTINUED;
}
+out:
+ spin_unlock(&si->cont_lock);
+ return ret;
}
/*