*/
static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
-static unsigned long zv_curr_dist_counts[NCHUNKS];
-static unsigned long zv_cumul_dist_counts[NCHUNKS];
+static atomic_t zv_curr_dist_counts[NCHUNKS];
+static atomic_t zv_cumul_dist_counts[NCHUNKS];
static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
&page, &offset, ZCACHE_GFP_MASK);
if (unlikely(ret))
goto out;
- zv_curr_dist_counts[chunks]++;
- zv_cumul_dist_counts[chunks]++;
+ atomic_inc(&zv_curr_dist_counts[chunks]);
+ atomic_inc(&zv_cumul_dist_counts[chunks]);
zv = kmap_atomic(page, KM_USER0) + offset;
zv->index = index;
zv->oid = *oid;
ASSERT_SENTINEL(zv, ZVH);
BUG_ON(chunks >= NCHUNKS);
- zv_curr_dist_counts[chunks]--;
+ atomic_dec(&zv_curr_dist_counts[chunks]);
size -= sizeof(*zv);
BUG_ON(size == 0);
INVERT_SENTINEL(zv, ZVH);
char *p = buf;
for (i = 0; i < NCHUNKS; i++) {
- n = zv_curr_dist_counts[i];
+ n = atomic_read(&zv_curr_dist_counts[i]);
p += sprintf(p, "%lu ", n);
chunks += n;
sum_total_chunks += i * n;
char *p = buf;
for (i = 0; i < NCHUNKS; i++) {
- n = zv_cumul_dist_counts[i];
+ n = atomic_read(&zv_cumul_dist_counts[i]);
p += sprintf(p, "%lu ", n);
chunks += n;
sum_total_chunks += i * n;