* and per-family private info->pointers.
* But we need to stay compatible with older kernels.
* If it returns successfully, adm_ctx members are valid.
+ *
+ * At this point, we still rely on the global genl_lock().
+ * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
+ * to add additional synchronization against object destruction/modification.
*/
#define DRBD_ADM_NEED_MINOR 1
#define DRBD_ADM_NEED_RESOURCE 2
if (err)
goto fail;
- /* and assign stuff to the global adm_ctx */
+ /* and assign stuff to the adm_ctx */
nla = nested_attr_tb[__nla_type(T_ctx_volume)];
if (nla)
adm_ctx->volume = nla_get_u32(nla);
adm_ctx->minor = d_in->minor;
adm_ctx->device = minor_to_device(d_in->minor);
+
+ /* We are protected by the global genl_lock().
+ * But we may explicitly drop it/retake it in drbd_adm_set_role(),
+ * so make sure this object stays around. */
+ if (adm_ctx->device)
+ kref_get(&adm_ctx->device->kref);
+
if (adm_ctx->resource_name) {
adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
}
return ERR_INVALID_REQUEST;
}
+ /* still, provide adm_ctx->resource always, if possible. */
+ if (!adm_ctx->resource) {
+ adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
+ : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
+ if (adm_ctx->resource)
+ kref_get(&adm_ctx->resource->kref);
+ }
+
return NO_ERROR;
fail:
static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
struct genl_info *info, int retcode)
{
+ if (adm_ctx->device) {
+ kref_put(&adm_ctx->device->kref, drbd_destroy_device);
+ adm_ctx->device = NULL;
+ }
if (adm_ctx->connection) {
kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
adm_ctx->connection = NULL;
put_ldev(device);
}
} else {
- mutex_lock(&device->resource->conf_update);
+ /* Called from drbd_adm_set_role only.
+ * We are still holding the conf_update mutex. */
nc = first_peer_device(device)->connection->net_conf;
if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */
- mutex_unlock(&device->resource->conf_update);
set_disk_ro(device->vdisk, false);
if (get_ldev(device)) {
goto out;
}
}
+ genl_unlock();
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
else
retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
+
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ genl_lock();
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
device = adm_ctx.device;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
/* we also need a disk
* to change the options on */
success:
put_ldev(device);
out:
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
goto finish;
device = adm_ctx.device;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
conn_reconfig_start(first_peer_device(device)->connection);
/* if you want to reconfigure, please tear down first */
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device);
conn_reconfig_done(first_peer_device(device)->connection);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
kfree(new_disk_conf);
lc_destroy(resync_lru);
kfree(new_plan);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_detach(adm_ctx.device, parms.force_detach);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
connection = adm_ctx.connection;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_net_conf) {
done:
conn_reconfig_done(connection);
out:
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
connection = first_connection(adm_ctx.resource);
conn_reconfig_start(connection);
retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
kfree(new_net_conf);
conn_reconfig_done(connection);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */
else
retcode = NO_ERROR;
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
fail:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto fail;
+ goto finish;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (!get_ldev(device)) {
retcode = ERR_NO_DISK;
}
fail:
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
+ finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
goto fail;
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
err = set_resource_options(adm_ctx.resource, &res_opts);
if (err) {
retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM)
retcode = ERR_NOMEM;
}
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
fail:
drbd_adm_finish(&adm_ctx, info, retcode);
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = drbd_request_state(adm_ctx.device, mask, val);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
/* If there is still bitmap IO pending, probably because of a previous
} else
retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
retcode = ERR_PAUSE_IS_SET;
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
s = adm_ctx.device->state;
if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
retcode = ERR_PAUSE_IS_CLEAR;
}
}
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
device = adm_ctx.device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
drbd_uuid_new_current(device);
tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
}
drbd_resume_io(device);
-
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
goto out;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
+
/* w_make_ov_request expects position to be aligned */
device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
device->ov_stop_sector = parms.ov_stop_sector;
wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
drbd_resume_io(device);
+
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(device)) {
put_ldev(device);
out:
mutex_unlock(device->state_mutex);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out_nolock:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
goto out;
}
+ /* not yet safe for genl_family.parallel_ops */
if (!conn_create(adm_ctx.resource_name, &res_opts))
retcode = ERR_NOMEM;
out:
goto out;
}
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = drbd_create_device(&adm_ctx, dh->minor);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (retcode != NO_ERROR)
goto out;
+ mutex_lock(&adm_ctx.resource->adm_mutex);
retcode = adm_del_minor(adm_ctx.device);
+ mutex_unlock(&adm_ctx.resource->adm_mutex);
out:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
resource = adm_ctx.resource;
+ mutex_lock(&resource->adm_mutex);
/* demote */
for_each_connection(connection, resource) {
struct drbd_peer_device *peer_device;
synchronize_rcu();
drbd_free_resource(resource);
retcode = NO_ERROR;
-
out:
+ mutex_unlock(&resource->adm_mutex);
+finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
- goto out;
+ goto finish;
resource = adm_ctx.resource;
+ mutex_lock(&resource->adm_mutex);
for_each_connection(connection, resource) {
if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED;
drbd_free_resource(resource);
retcode = NO_ERROR;
out:
+ mutex_unlock(&resource->adm_mutex);
+finish:
drbd_adm_finish(&adm_ctx, info, retcode);
return 0;
}