}
/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
+ * reconnecting targets use LUSTRE_STRIPE_MAXBYTES.
+ */
static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
{
struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
/* lov_do_div64(a, b) returns a % b, and a = a / b.
* The 32-bit code is LOV-specific due to knowing about stripe limits in
* order to reduce the divisor to a 32-bit number. If the divisor is
- * already a 32-bit value the compiler handles this directly. */
+ * already a 32-bit value the compiler handles this directly.
+ */
#if BITS_PER_LONG == 64
# define lov_do_div64(n, base) ({ \
uint64_t __base = (base); \
atomic_t set_refcount;
struct obd_export *set_exp;
/* XXX: There is @set_exp already, however obd_statfs gets obd_device
- only. */
+ * only.
+ */
struct obd_device *set_obd;
int set_count;
atomic_t set_completes;
* to remember the subio. This is because lock is able
* to be cached, but this is not true for IO. This
* further means a sublock might be referenced in
- * different io context. -jay */
+ * different io context. -jay
+ */
sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
descr, "lov-parent", parent);
result = cl_enqueue_try(env, sublock, io, enqflags);
if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
/* if it is enqueued, try to `wait' on it---maybe it's already
- * granted */
+ * granted
+ */
result = cl_wait_try(env, sublock);
if (result == CLO_REENQUEUED)
result = CLO_WAIT;
} else {
kmem_cache_free(lov_lock_link_kmem, link);
/* other thread allocated sub-lock, or enqueue is no
- * longer going on */
+ * longer going on
+ */
cl_lock_mutex_put(env, parent);
cl_lock_unhold(env, sublock, "lov-parent", parent);
cl_lock_mutex_get(env, parent);
if (!sub) {
result = lov_sublock_fill(env, lock, io, lck, i);
/* lov_sublock_fill() released @lock mutex,
- * restart. */
+ * restart.
+ */
break;
}
sublock = sub->lss_cl.cls_lock;
/* take recursive mutex of sublock */
cl_lock_mutex_get(env, sublock);
/* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.*/
+ * otherwise it may deadlock. LU-2683.
+ */
lov_sublock_unlock(env, sub, closure,
subenv);
/* sublock and parent are held. */
/* top-lock state cannot change concurrently, because single
* thread (one that released the last hold) carries unlocking
- * to the completion. */
+ * to the completion.
+ */
LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
/* top-lock state cannot change concurrently, because single
* thread (one that released the last hold) carries unlocking
- * to the completion. */
+ * to the completion.
+ */
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
if (!sub)
if (result != 0)
break;
}
- /* Each sublock only can be reenqueued once, so will not loop for
- * ever. */
+ /* Each sublock only can be reenqueued once, so will not loop
+ * forever.
+ */
if (result == 0 && reenqueued != 0)
goto again;
cl_lock_closure_fini(closure);
i, 1, rc);
} else if (sublock->cll_state == CLS_NEW) {
/* Sub-lock might have been canceled, while
- * top-lock was cached. */
+ * top-lock was cached.
+ */
result = -ESTALE;
lov_sublock_release(env, lck, i, 1, result);
}
LASSERT(lov->lls_nr > 0);
/* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue. */
+ * run into problem if a sublock is missing and reenqueue.
+ */
if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
return 0;
#include "lov_internal.h"
/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
- Any function that expects lov_tgts to remain stationary must take a ref. */
+ * Any function that expects lov_tgts to remain stationary must take a ref.
+ */
static void lov_getref(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
list_add(&tgt->ltd_kill, &kill);
/* XXX - right now there is a dependency on ld_tgt_count
* being the maximum tgt index for computing the
- * mds_max_easize. So we can't shrink it. */
+ * mds_max_easize. So we can't shrink it.
+ */
lov_ost_pool_remove(&lov->lov_packed, i);
lov->lov_tgts[i] = NULL;
lov->lov_death_row--;
if (activate) {
tgt_obd->obd_no_recov = 0;
/* FIXME this is probably supposed to be
- ptlrpc_set_import_active. Horrible naming. */
+ * ptlrpc_set_import_active. Horrible naming.
+ */
ptlrpc_activate_import(imp);
}
}
/* Let's hold another reference so lov_del_obd doesn't spin through
- putref every time */
+ * putref every time
+ */
obd_getref(obd);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
continue;
/* don't send sync event if target not
- * connected/activated */
+ * connected/activated
+ */
if (is_sync && !lov->lov_tgts[i]->ltd_active)
continue;
if (lov->lov_connects == 0) {
/* lov_connect hasn't been called yet. We'll do the
- lov_connect_obd on this target when that fn first runs,
- because we don't know the connect flags yet. */
+ * lov_connect_obd on this target when that fn first runs,
+ * because we don't know the connect flags yet.
+ */
return 0;
}
kfree(tgt);
/* Manual cleanup - no cleanup logs to clean up the osc's. We must
- do it ourselves. And we can't do it from lov_cleanup,
- because we just lost our only reference to it. */
+ * do it ourselves. And we can't do it from lov_cleanup,
+ * because we just lost our only reference to it.
+ */
if (osc_obd)
class_manual_cleanup(osc_obd);
}
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
/* In the function below, .hs_keycmp resolves to
- * pool_hashkey_keycmp() */
+ * pool_hashkey_keycmp()
+ */
/* coverity[overrun-buffer-val] */
lov_pool_del(obd, pool->pool_name);
}
if (lov->lov_tgts[i]->ltd_active ||
atomic_read(&lov->lov_refcount))
/* We should never get here - these
- should have been removed in the
- disconnect. */
+ * should have been removed in the
+ * disconnect.
+ */
CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
i, lov->lov_death_row,
atomic_read(&lov->lov_refcount));
}
/* If @oti is given, the request goes from MDS and responses from OSTs are not
- needed. Otherwise, a client is waiting for responses. */
+ * needed. Otherwise, a client is waiting for responses.
+ */
static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti,
struct ptlrpc_request_set *rqset)
/* find any ldlm lock of the inode in lov
* return 0 not find
* 1 find one
- * < 0 error */
+ * < 0 error
+ */
static int lov_find_cbdata(struct obd_export *exp,
struct lov_stripe_md *lsm, ldlm_iterator_t it,
void *data)
int rc = 0;
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
- * statfs requests */
+ * statfs requests
+ */
set = ptlrpc_prep_set();
if (!set)
return -ENOMEM;
continue;
/* ll_umount_begin() sets force flag but for lov, not
- * osc. Let's pass it through */
+ * osc. Let's pass it through
+ */
osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
osc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
return -EINVAL;
/* If we have finished mapping on previous device, shift logical
- * offset to start of next device */
+ * offset to start of next device
+ */
if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
&lun_start, &lun_end)) != 0 &&
local_end < lun_end) {
*start_stripe = stripe_no;
} else {
/* This is a special value to indicate that caller should
- * calculate offset in next stripe. */
+ * calculate offset in next stripe.
+ */
fm_end_offset = 0;
*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
}
/* If this is a continuation FIEMAP call and we are on
* starting stripe then lun_start needs to be set to
- * fm_end_offset */
+ * fm_end_offset
+ */
if (fm_end_offset != 0 && cur_stripe == start_stripe)
lun_start = fm_end_offset;
len_mapped_single_call = 0;
/* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly */
+ * extents we may need to loop on a single OST repeatedly
+ */
ost_eof = 0;
ost_done = 0;
do {
if (ext_count == 0) {
ost_done = 1;
/* If last stripe has hole at the end,
- * then we need to return */
+ * then we need to return
+ */
if (cur_stripe_wrap == last_stripe) {
fiemap->fm_mapped_extents = 0;
goto finish;
ost_done = 1;
/* Clear the EXTENT_LAST flag which can be present on
- * last extent */
+ * last extent
+ */
if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
lcl_fm_ext[ext_count - 1].fe_flags &=
~FIEMAP_EXTENT_LAST;
finish:
/* Indicate that we are returning device offsets unless file just has
- * single stripe */
+ * single stripe
+ */
if (lsm->lsm_stripe_count > 1)
fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
goto skip_last_device_calc;
/* Check if we have reached the last stripe and whether mapping for that
- * stripe is done. */
+ * stripe is done.
+ */
if (cur_stripe_wrap == last_stripe) {
if (ost_done || ost_eof)
fiemap->fm_extents[current_extent - 1].fe_flags |=
/* XXX This is another one of those bits that will need to
* change if we ever actually support nested LOVs. It uses
- * the lock's export to find out which stripe it is. */
+ * the lock's export to find out which stripe it is.
+ */
/* XXX - it's assumed all the locks for deleted OSTs have
* been cancelled. Also, the export for deleted OSTs will
- * be NULL and won't match the lock's export. */
+ * be NULL and won't match the lock's export.
+ */
for (i = 0; i < lsm->lsm_stripe_count; i++) {
loi = lsm->lsm_oinfo[i];
if (lov_oinfo_is_dummy(loi))
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
+ * symbols from modules.
+ */
CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
rc = lu_kmem_init(lov_caches);
* Do not leave the object in cache to avoid accessing
* freed memory. This is because osc_object is referring to
* lov_oinfo of lsm_stripe_data which will be freed due to
- * this failure. */
+ * this failure.
+ */
cl_object_kill(env, stripe);
cl_object_put(env, stripe);
return -EIO;
old_lov = cl2lov(lu2cl(old_obj));
if (old_lov->lo_layout_invalid) {
/* the object's layout has already changed but isn't
- * refreshed */
+ * refreshed
+ */
lu_object_unhash(env, &stripe->co_lu);
result = -EAGAIN;
} else {
subconf->u.coc_oinfo = oinfo;
LASSERTF(subdev, "not init ost %d\n", ost_idx);
/* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp() */
+ * lu_obj_hop_keycmp()
+ */
/* coverity[overrun-buffer-val] */
stripe = lov_sub_find(env, subdev, ofid, subconf);
if (!IS_ERR(stripe)) {
cl_object_put(env, sub);
/* ... wait until it is actually destroyed---sub-object clears its
- * ->lo_sub[] slot in lovsub_object_fini() */
+ * ->lo_sub[] slot in lovsub_object_fini()
+ */
if (r0->lo_sub[idx] == los) {
waiter = &lov_env_info(env)->lti_waiter;
init_waitqueue_entry(waiter, current);
set_current_state(TASK_UNINTERRUPTIBLE);
while (1) {
/* this wait-queue is signaled at the end of
- * lu_object_free(). */
+ * lu_object_free().
+ */
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
* context, and this function is called in ccc_lock_state(), it will
* hit this assertion.
* Anyway, it's still okay to call attr_get w/o type guard as layout
- * can't go if locks exist. */
+ * can't go if locks exist.
+ */
/* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
if (!r0->lo_attr_valid) {
memset(lvb, 0, sizeof(*lvb));
/* XXX: timestamps can be negative by sanity:test_39m,
- * how can it be? */
+ * how can it be?
+ */
lvb->lvb_atime = LLONG_MIN;
lvb->lvb_ctime = LLONG_MIN;
lvb->lvb_mtime = LLONG_MIN;
struct cl_attr *attr)
{
/* do not take lock, as this function is called under a
- * spin-lock. Layout is protected from changing by ongoing IO. */
+ * spin-lock. Layout is protected from changing by ongoing IO.
+ */
return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
}
* this function returns < 0 when the offset was "before" the stripe and
* was moved forward to the start of the stripe in question; 0 when it
* falls in the stripe and no shifting was done; > 0 when the offset
- * was outside the stripe and was pulled back to its final byte. */
+ * was outside the stripe and was pulled back to its final byte.
+ */
int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
int stripeno, u64 *obdoff)
{
/* given an extent in an lov and a stripe, calculate the extent of the stripe
* that is contained within the lov extent. this returns true if the given
- * stripe does intersect with the lov extent. */
+ * stripe does intersect with the lov extent.
+ */
int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end, u64 *obd_start, u64 *obd_end)
{
/* this stripe doesn't intersect the file extent when neither
* start or the end intersected the stripe and obd_start and
- * obd_end got rounded up to the save value. */
+ * obd_end got rounded up to the save value.
+ */
if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
return 0;
* in the wrong direction and touch it up.
* interestingly, this can't underflow since end must be > start
* if we passed through the previous check.
- * (should we assert for that somewhere?) */
+ * (should we assert for that somewhere?)
+ */
if (end_side != 0)
(*obd_end)--;
if (lsm) {
/* If we are just sizing the EA, limit the stripe count
- * to the actual number of OSTs in this filesystem. */
+ * to the actual number of OSTs in this filesystem.
+ */
if (!lmmp) {
stripe_count = lov_get_stripecnt(lov, lmm_magic,
lsm->lsm_stripe_count);
/* No need to allocate more than maximum supported stripes.
* Anyway, this is pretty inaccurate since ld_tgt_count now
* represents max index and we should rely on the actual number
- * of OSTs instead */
+ * of OSTs instead
+ */
stripe_count = lov_mds_md_max_stripe_count(
lov->lov_ocd.ocd_max_easize, lmm_magic);
stripe_count = 1;
/* stripe count is based on whether ldiskfs can handle
- * larger EA sizes */
+ * larger EA sizes
+ */
if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
lov->lov_ocd.ocd_max_easize)
max_stripes = lov_mds_md_max_stripe_count(
set_fs(KERNEL_DS);
/* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3) */
+ * lmm_stripe_count, (the header part is common to v1 and v3)
+ */
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
if ((pool_tgt_count(pool) == 0) ||
(*pos >= pool_tgt_count(pool))) {
/* iter is not created, so stop() has no way to
- * find pool to dec ref */
+ * find pool to dec ref
+ */
lov_pool_putref(pool);
return NULL;
}
iter->idx = 0;
/* we use seq_file private field to memorized iterator so
- * we can free it at stop() */
+ * we can free it at stop()
+ */
/* /!\ do not forget to restore it to pool before freeing it */
s->private = iter;
if (*pos > 0) {
/* in some cases stop() method is called 2 times, without
* calling start() method (see seq_read() from fs/seq_file.c)
- * we have to free only if s->private is an iterator */
+ * we have to free only if s->private is an iterator
+ */
if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
/* we restore s->private so next call to pool_proc_start()
- * will work */
+ * will work
+ */
s->private = iter->pool;
lov_pool_putref(iter->pool);
kfree(iter);
if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
(set->set_oi->oi_md->lsm_stripe_count != attrset)) {
/* When we take attributes of some epoch, we require all the
- * ost to be active. */
+ * ost to be active.
+ */
CERROR("Not all the stripes had valid attrs\n");
rc = -EIO;
goto out;
}
/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_getattr_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
}
/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_setattr_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
}
/* The callback for osc_statfs_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_statfs_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
lov_sfs = oinfo->oi_osfs;
success = atomic_read(&set->set_success);
/* XXX: the same is done in lov_update_common_set, however
- lovset->set_exp is not initialized. */
+ * lovset->set_exp is not initialized.
+ */
lov_update_set(set, lovreq, rc);
if (rc)
goto out;
}
/* skip targets that have been explicitly disabled by the
- * administrator */
+ * administrator
+ */
if (!lov->lov_tgts[i]->ltd_exp) {
CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
continue;
{
pgoff_t size; /* stripe size in pages */
pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes */
+ * "other" stripes
+ */
pgoff_t start;
pgoff_t end;
switch (parent->cll_state) {
case CLS_ENQUEUED:
/* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal */
+ * interrupted by signal
+ */
LASSERT(parent->cll_flags & CLF_CANCELLED);
break;
case CLS_QUEUING: