{
struct lov_thread_info *info;
- info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO);
+ info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
if (info)
INIT_LIST_HEAD(&info->lti_closure.clc_list);
else
{
struct lov_session *info;
- info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO);
+ info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS);
if (!info)
info = ERR_PTR(-ENOMEM);
return info;
struct lov_req *lr;
int result;
- lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO);
+ lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
if (lr) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
return NULL;
for (i = 0; i < stripe_count; i++) {
- loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO);
+ loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS);
if (!loi)
goto err;
lsm->lsm_oinfo[i] = loi;
LASSERT(idx < lck->lls_nr);
- link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO);
+ link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
if (link) {
struct lov_sublock_env *subenv;
struct lov_lock_sub *lls;
struct lov_lock *lck;
int result;
- lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
+ lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
result = lov_lock_sub_init(env, lck, io);
struct lov_lock *lck;
int result = -ENOMEM;
- lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
+ lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;
struct lov_object *lov;
struct lu_object *obj;
- lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO);
+ lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS);
if (lov) {
obj = lov2lu(lov);
lu_object_init(obj, NULL, dev);
if (!atomic_read(&set->set_success))
return -EIO;
- tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
+ tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!tmp_oa) {
rc = -ENOMEM;
goto out;
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
struct lovsub_req *lsr;
int result;
- lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO);
+ lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
if (lsr) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
struct lovsub_lock *lsk;
int result;
- lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO);
+ lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS);
if (lsk) {
INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
struct lovsub_object *los;
struct lu_object *obj;
- los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO);
+ los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS);
if (los) {
struct cl_object_header *hdr;