/**
* Contexts usable in cache shrinker thread.
*/
- LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
+ LCT_SHRINKER = LCT_MD_THREAD | LCT_DT_THREAD | LCT_CL_THREAD |
+ LCT_NOREF
};
/**
#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
-#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
+#define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
__u32 llh_cat_idx;
/* for a catalog the first plain slot is next to it */
struct obd_uuid llh_tgtuuid;
- __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
- __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
+ __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
+ __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
struct llog_rec_tail llh_tail;
} __packed;
/** lustre_capa::lc_opc */
enum {
- CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
- CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
- CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
- CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
- CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
- CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
- CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
- CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
- CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
- CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
- CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
+ CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
+ CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */
+ CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */
+ CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */
+ CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */
+ CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */
+ CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */
+ CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */
+ CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */
+ CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */
+ CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */
};
#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+#define CR_MAXSIZE cfs_size_round(2 * NAME_MAX + 1 + \
sizeof(struct changelog_ext_rec))
struct changelog_rec {
* (from OID), or up to 128M inodes without collisions for new files.
*/
ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
+ (seq >> (64 - (40 - 8)) & 0xffffff00) +
(fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
return ino ? ino : fid_oid(fid);
/* 4UL * 1024 * 1024 */
#define LL_MAX_BLKSIZE_BITS (22)
-#define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS)
+#define LL_MAX_BLKSIZE (1UL << LL_MAX_BLKSIZE_BITS)
/*
* This is embedded into llite super-blocks to keep track of
* running on a backup server. (If it's too low, import_select_connection
* will increase the timeout anyhow.)
*/
-#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
+#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout / 20)
/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
INITIAL_CONNECT_TIMEOUT)
/* The min time a target should wait for clients to reconnect in recovery */
-#define OBD_RECOVERY_TIME_MIN (2*RECONNECT_DELAY_MAX)
+#define OBD_RECOVERY_TIME_MIN (2 * RECONNECT_DELAY_MAX)
#define OBD_IR_FACTOR_MIN 1
#define OBD_IR_FACTOR_MAX 10
-#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX/2)
+#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX / 2)
/* default timeout for the MGS to become IR_FULL */
-#define OBD_IR_MGS_TIMEOUT (4*obd_timeout)
+#define OBD_IR_MGS_TIMEOUT (4 * obd_timeout)
#define LONG_UNLINK 300 /* Unlink should happen before now */
/**
POISON_PTR(ptr); \
} while (0)
-#define KEY_IS(str) \
- (keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
+#define KEY_IS(str) \
+ (keylen >= (sizeof(str) - 1) && \
+ memcmp(key, str, (sizeof(str) - 1)) == 0)
#endif
lock_res_and_lock(lock);
ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
- (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
+ (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
*/
- if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
+ if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
if (och) {
mode = och->och_flags &
- (FMODE_READ|FMODE_WRITE);
+ (FMODE_READ | FMODE_WRITE);
rc = ll_lease_close(och, inode, &lease_broken);
if (rc == 0 && lease_broken)
mode = 0;
struct lustre_handle lockh;
ldlm_policy_data_t policy;
enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
- (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
+ (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
struct lu_fid *fid;
__u64 flags;
int i;
*flags |= tmp;
goto next;
}
- tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
+ tmp = ll_set_opt("noflock", s1,
+ LL_SBI_FLOCK | LL_SBI_LOCALFLOCK);
if (tmp) {
*flags &= ~tmp;
goto next;
{
int mode = d_inode(de)->i_mode;
- if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
- (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
+ if ((attr->ia_valid & (ATTR_CTIME | ATTR_SIZE | ATTR_MODE)) ==
+ (ATTR_CTIME | ATTR_SIZE | ATTR_MODE))
attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
- (ATTR_SIZE|ATTR_MODE)) &&
+ if (((attr->ia_valid & (ATTR_MODE | ATTR_FORCE | ATTR_SIZE)) ==
+ (ATTR_SIZE | ATTR_MODE)) &&
(((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
- (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ (((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
!(attr->ia_mode & S_ISGID))))
attr->ia_valid |= ATTR_FORCE;
attr->ia_valid |= ATTR_KILL_SUID;
if ((attr->ia_valid & ATTR_MODE) &&
- ((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) &&
!(attr->ia_mode & S_ISGID) &&
!(attr->ia_valid & ATTR_KILL_SGID))
attr->ia_valid |= ATTR_KILL_SGID;
lli->lli_ctime = body->mbo_ctime;
}
if (body->mbo_valid & OBD_MD_FLMODE)
- inode->i_mode = (inode->i_mode & S_IFMT)|(body->mbo_mode & ~S_IFMT);
+ inode->i_mode = (inode->i_mode & S_IFMT) |
+ (body->mbo_mode & ~S_IFMT);
if (body->mbo_valid & OBD_MD_FLTYPE)
- inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mbo_mode & S_IFMT);
+ inode->i_mode = (inode->i_mode & ~S_IFMT) |
+ (body->mbo_mode & S_IFMT);
LASSERT(inode->i_mode != 0);
if (S_ISREG(inode->i_mode))
inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1,
fio = &io->u.ci_fault;
fio->ft_index = index;
- fio->ft_executable = vma->vm_flags&VM_EXEC;
+ fio->ft_executable = vma->vm_flags & VM_EXEC;
/*
* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
* filemap_nopage. we do our readahead in ll_readpage.
*/
if (ra_flags)
- *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+ *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
vma->vm_flags &= ~VM_SEQ_READ;
vma->vm_flags |= VM_RAND_READ;
/* file operation */
{ LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
{ LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
- { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"read_bytes" },
- { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"write_bytes" },
- { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_read" },
- { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
"brw_write" },
- { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"osc_read" },
- { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
"osc_write" },
{ LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
{ LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
r, pct(r, read_tot), pct(read_cum, read_tot),
w, pct(w, write_tot), pct(write_cum, write_tot));
start = end;
- if (start == 1<<10) {
+ if (start == 1 << 10) {
start = 1;
units += 10;
unitp++;
(inode_permission(parent, MAY_WRITE | MAY_EXEC) == 0))
return NULL;
- if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
+ if (flags & (LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE))
itp = NULL;
else
itp = ⁢
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
- mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
+ mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
err = ll_new_node(dir, dentry, NULL, mode, 0, LUSTRE_OPC_MKDIR);
if (!err)
if (!stride_io_mode(ras) && (stride_gap != 0 ||
ras->ras_consecutive_stride_requests == 0)) {
ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
+ ras->ras_stride_length = ras->ras_consecutive_pages +
+ stride_gap;
}
LASSERT(ras->ras_request_index == 0);
LASSERT(ras->ras_consecutive_stride_requests == 0);
}
ras->ras_stride_pages = ras->ras_consecutive_pages;
- ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap + ras->ras_consecutive_pages;
RAS_CDEBUG(ras);
return;
return result;
}
-#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
+#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL)
static inline int ll_get_user_pages(int rw, unsigned long user_addr,
size_t size, struct page ***pages,
rc = -ENOMEM;
ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
+ sizeof(struct ll_inode_info), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!ll_inode_cachep)
goto out_cache;
if (0 && valid & CAT_SIZE)
i_size_write(inode, attr->cat_size);
/* not currently necessary */
- if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
+ if (0 && valid & (CAT_UID | CAT_GID | CAT_SIZE))
mark_inode_dirty(inode);
return 0;
}
}
if (lmm->lmm_stripe_size == 0 ||
- (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
+ (le32_to_cpu(lmm->lmm_stripe_size) &
+ (LOV_MIN_STRIPE_SIZE - 1)) != 0) {
CERROR("bad stripe size %u\n",
le32_to_cpu(lmm->lmm_stripe_size));
lov_dump_lmm_common(D_WARNING, lmm);
break;
}
- len_mapped_single_call = lcl_fm_ext[ext_count-1].fe_logical -
- lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+ len_mapped_single_call =
+ lcl_fm_ext[ext_count - 1].fe_logical -
+ lun_start + lcl_fm_ext[ext_count - 1].fe_length;
/* Have we finished mapping on this device? */
if (req_fm_len <= len_mapped_single_call)
/* Clear the EXTENT_LAST flag which can be present on
* last extent
*/
- if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
+ if (lcl_fm_ext[ext_count - 1].fe_flags &
+ FIEMAP_EXTENT_LAST)
lcl_fm_ext[ext_count - 1].fe_flags &=
~FIEMAP_EXTENT_LAST;
curr_loc = lov_stripe_size(lsm,
- lcl_fm_ext[ext_count - 1].fe_logical+
- lcl_fm_ext[ext_count - 1].fe_length,
- cur_stripe);
+ lcl_fm_ext[ext_count - 1].fe_logical +
+ lcl_fm_ext[ext_count - 1].fe_length,
+ cur_stripe);
if (curr_loc >= fm_key->oa.o_size)
ost_eof = 1;
for (i = 0; i < LOV_MAXPOOLNAME; i++) {
if (poolname[i] == '\0')
break;
- result = (result << 4)^(result >> 28) ^ poolname[i];
+ result = (result << 4) ^ (result >> 28) ^ poolname[i];
}
return (result % mask);
}
else
mode = LCK_PR;
} else {
- if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
+ if (it->it_flags & (FMODE_WRITE | MDS_OPEN_TRUNC))
mode = LCK_CW;
else if (it->it_flags & __FMODE_EXEC)
mode = LCK_PR;
__u64 bits;
bits = MDS_INODELOCK_UPDATE;
- if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
+ if (op_data->op_attr.ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
bits |= MDS_INODELOCK_LOOKUP;
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
(fid_is_sane(&op_data->op_fid1)) &&
}
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1);
- req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, newlen+1);
+ req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT,
+ newlen + 1);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
LASSERT(addr);
put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
+ put_unaligned_le64(id, addr + LPDS);
addr += len - LPDS - LPDS;
put_unaligned_le64(off, addr);
- put_unaligned_le64(id, addr+LPDS);
+ put_unaligned_le64(id, addr + LPDS);
return 0;
}
continue;
if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
if (next)
- *next = i+1;
+ *next = i + 1;
read_unlock(&obd_dev_lock);
return obd;
}
if (rc < 0)
return rc;
- ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command);
+ ptr += snprintf(ptr, end - ptr, "cmd=%05x ", lcfg->lcfg_command);
if (lcfg->lcfg_flags)
- ptr += snprintf(ptr, end-ptr, "flags=%#08x ",
+ ptr += snprintf(ptr, end - ptr, "flags=%#08x ",
lcfg->lcfg_flags);
if (lcfg->lcfg_num)
- ptr += snprintf(ptr, end-ptr, "num=%#08x ", lcfg->lcfg_num);
+ ptr += snprintf(ptr, end - ptr, "num=%#08x ", lcfg->lcfg_num);
if (lcfg->lcfg_nid) {
char nidstr[LNET_NIDSTR_SIZE];
libcfs_nid2str_r(lcfg->lcfg_nid, nidstr, sizeof(nidstr));
- ptr += snprintf(ptr, end-ptr, "nid=%s(%#llx)\n ",
+ ptr += snprintf(ptr, end - ptr, "nid=%s(%#llx)\n ",
nidstr, lcfg->lcfg_nid);
}
if (lcfg->lcfg_command == LCFG_MARKER) {
struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
- ptr += snprintf(ptr, end-ptr, "marker=%d(%#x)%s '%s'",
+ ptr += snprintf(ptr, end - ptr, "marker=%d(%#x)%s '%s'",
marker->cm_step, marker->cm_flags,
marker->cm_tgtname, marker->cm_comment);
} else {
int i;
for (i = 0; i < lcfg->lcfg_bufcount; i++) {
- ptr += snprintf(ptr, end-ptr, "%d:%s ", i,
+ ptr += snprintf(ptr, end - ptr, "%d:%s ", i,
lustre_cfg_string(lcfg, i));
}
}
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#define D_MOUNT (D_SUPER|D_CONFIG/*|D_WARNING */)
+#define D_MOUNT (D_SUPER | D_CONFIG/*|D_WARNING */)
#define PRINT_CMD CDEBUG
#include "../include/obd.h"
exclude_list[lmd->lmd_exclude_count++] = index;
else
CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
- (uint)(s2-s1), s1, rc);
+ (uint)(s2 - s1), s1, rc);
s1 = s2;
/* now we are pointing at ':' (next exclude)
* or ',' (end of excludes)
struct lustre_sb_info *lsi;
int rc;
- CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
+ CDEBUG(D_MOUNT | D_VFSTRACE, "VFS Op: sb %p\n", sb);
lsi = lustre_init_lsi(sb);
if (!lsi)
}
#if 0 /* you shouldn't be able to change a file's type with setattr */
if (valid & OBD_MD_FLTYPE) {
- attr->ia_mode = (attr->ia_mode & ~S_IFMT)|(oa->o_mode & S_IFMT);
+ attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
+ (oa->o_mode & S_IFMT);
attr->ia_valid |= ATTR_MODE;
}
#endif
if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
+ attr->ia_mode = (attr->ia_mode & S_IFMT) |
+ (oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
!capable(CFS_CAP_FSETID))
/* The persistent object (i.e. actually stores stuff!) */
#define ECHO_PERSISTENT_OBJID 1ULL
-#define ECHO_PERSISTENT_SIZE ((__u64)(1<<20))
+#define ECHO_PERSISTENT_SIZE ((__u64)(1 << 20))
/* block size to use for data verification */
-#define OBD_ECHO_BLOCK_SIZE (4<<10)
+#define OBD_ECHO_BLOCK_SIZE (4 << 10)
#endif
goto out;
spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
+ oap->oap_async_flags |= ASYNC_READY | ASYNC_URGENT;
spin_unlock(&oap->oap_lock);
if (memory_pressure_get())
extern struct lu_context_key osc_key;
extern struct lu_context_key osc_session_key;
-#define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
+#define OSC_FLAGS (ASYNC_URGENT | ASYNC_READY)
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
}
spin_lock(&oap->oap_lock);
- oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
+ oap->oap_async_flags = ASYNC_URGENT | ASYNC_READY;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
long writing_bytes)
{
- u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
+ u32 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
LASSERT(!(oa->o_valid & bits));
oa->o_undirty = 0;
} else {
long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_SHIFT)*
+ PAGE_SHIFT) *
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT,
max_in_flight);
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
POSTID(&oa->o_oi), pga[0]->off,
- pga[page_count-1]->off + pga[page_count-1]->count - 1);
+ pga[page_count - 1]->off +
+ pga[page_count - 1]->count - 1);
CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
client_cksum, client_cksum_type,
server_cksum, cksum_type, new_cksum);
char *router = "";
enum cksum_type cksum_type;
- cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
+ cksum_type = cksum_type_unpack(body->oa.o_valid &
+ OBD_MD_FLFLAGS ?
body->oa.o_flags : 0);
client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
aa->aa_ppga, OST_READ,
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
cl_req_attr_set(env, clerq, crattr,
- OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
*/
lwi = LWI_TIMEOUT_INTERVAL(
cfs_timeout_cap(cfs_time_seconds(timeout)),
- (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
+ (timeout > 1) ? cfs_time_seconds(1) :
+ cfs_time_seconds(1) / 2,
NULL, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
(atomic_read(&imp->imp_inflight) == 0),
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
- sizeof(struct obd_connect_data)+16*sizeof(__u64));
+ sizeof(struct obd_connect_data) +
+ 16 * sizeof(__u64));
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
LASSERT(!*debugfs_root_ret);
LASSERT(!*stats_ret);
- svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
+ svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES,
0);
if (!svc_stats)
return;
* an argument, describing its "scope".
*/
rc = lu_context_init(&env.le_ctx,
- LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ LCT_CL_THREAD | LCT_REMEMBER | LCT_NOREF);
if (rc == 0) {
rc = lu_context_init(env.le_ses,
LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
* ptlrpcd thread (or a thread-set) has to be given an argument,
* describing its "scope".
*/
- rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
+ rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD | LCT_REMEMBER);
if (rc != 0)
goto out;
}
rc = lu_context_init(&env->le_ctx,
- svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
+ svc->srv_ctx_tags | LCT_REMEMBER | LCT_NOREF);
if (rc)
goto out_srv_fini;