dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
}
+static int metadata_update_start(struct mddev *mddev)
+{
+ return lock_comm(mddev->cluster_info);
+}
+
+static int metadata_update_finish(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(METADATA_UPDATED);
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int metadata_update_cancel(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return dlm_unlock_sync(cinfo->token_lockres);
+}
+
static struct md_cluster_operations cluster_ops = {
.join = join,
.leave = leave,
.slot_number = slot_number,
.resync_info_update = resync_info_update,
+ .metadata_update_start = metadata_update_start,
+ .metadata_update_finish = metadata_update_finish,
+ .metadata_update_cancel = metadata_update_cancel,
};
static int __init cluster_init(void)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
kick_rdev_from_array(rdev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
if (err)
return err;
if (mddev->pers) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
static void __md_stop_writes(struct mddev *mddev)
{
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
if (!rdev)
return -ENXIO;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
+
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+
return 0;
busy:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
err = -EINVAL;
goto abort_export;
}
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
- goto abort_export;
+ goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
md_new_event(mddev);
return 0;
+abort_clustered:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
return rv;
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
- if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
- return -EINVAL;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
+ rv = -EINVAL;
+ goto err;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto err;
+ }
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
struct bitmap *bitmap;
/* add the bitmap */
- if (mddev->bitmap)
- return -EEXIST;
- if (mddev->bitmap_info.default_offset == 0)
- return -EINVAL;
+ if (mddev->bitmap) {
+ rv = -EEXIST;
+ goto err;
+ }
+ if (mddev->bitmap_info.default_offset == 0) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
- if (!mddev->bitmap)
- return -ENOENT;
- if (mddev->bitmap->storage.file)
- return -EINVAL;
+ if (!mddev->bitmap) {
+ rv = -ENOENT;
+ goto err;
+ }
+ if (mddev->bitmap->storage.file) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
}
}
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ return rv;
+err:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ }
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);