mempool_t *tag_pool;
unsigned tag_pool_max_sectors;
+ struct percpu_counter n_allocated_pages;
+
struct bio_set *bs;
struct mutex bio_alloc_lock;
#define MAX_TAG_SIZE 480
#define POOL_ENTRY_SIZE 512
+static DEFINE_SPINLOCK(dm_crypt_clients_lock);
+static unsigned dm_crypt_clients_n = 0;
+static volatile unsigned long dm_crypt_pages_per_client;
+#define DM_CRYPT_MEMORY_PERCENT 2
+#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
+
static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
return r;
}
+static void crypt_calculate_pages_per_client(void)
+{
+ unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
+
+ if (!dm_crypt_clients_n)
+ return;
+
+ pages /= dm_crypt_clients_n;
+ if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
+ pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
+ dm_crypt_pages_per_client = pages;
+}
+
+static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ struct crypt_config *cc = pool_data;
+ struct page *page;
+
+ if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
+ likely(gfp_mask & __GFP_NORETRY))
+ return NULL;
+
+ page = alloc_page(gfp_mask);
+ if (likely(page != NULL))
+ percpu_counter_add(&cc->n_allocated_pages, 1);
+
+ return page;
+}
+
+static void crypt_page_free(void *page, void *pool_data)
+{
+ struct crypt_config *cc = pool_data;
+
+ __free_page(page);
+ percpu_counter_sub(&cc->n_allocated_pages, 1);
+}
+
static void crypt_dtr(struct dm_target *ti)
{
struct crypt_config *cc = ti->private;
mempool_destroy(cc->req_pool);
mempool_destroy(cc->tag_pool);
+ if (cc->page_pool)
+ WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
+ percpu_counter_destroy(&cc->n_allocated_pages);
+
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
/* Must zero key material before freeing */
kzfree(cc);
+
+ spin_lock(&dm_crypt_clients_lock);
+ WARN_ON(!dm_crypt_clients_n);
+ dm_crypt_clients_n--;
+ crypt_calculate_pages_per_client();
+ spin_unlock(&dm_crypt_clients_lock);
}
static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
ti->private = cc;
+ spin_lock(&dm_crypt_clients_lock);
+ dm_crypt_clients_n++;
+ crypt_calculate_pages_per_client();
+ spin_unlock(&dm_crypt_clients_lock);
+
+ ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto bad;
+
/* Optional parameters need to be read before cipher constructor */
if (argc > 5) {
ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
ARCH_KMALLOC_MINALIGN);
- cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
+ cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
if (!cc->page_pool) {
ti->error = "Cannot allocate page mempool";
goto bad;