do_release_stripe(conf, sh);
}
+/* should hold conf->device_lock already */
+static int release_stripe_list(struct r5conf *conf)
+{
+ struct stripe_head *sh;
+ int count = 0;
+ struct llist_node *head;
+
+ head = llist_del_all(&conf->released_stripes);
+ while (head) {
+ sh = llist_entry(head, struct stripe_head, release_list);
+ head = llist_next(head);
+ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
+ smp_mb();
+ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
+ /*
+ * Don't worry the bit is set here, because if the bit is set
+ * again, the count is always > 1. This is true for
+ * STRIPE_ON_UNPLUG_LIST bit too.
+ */
+ __release_stripe(conf, sh);
+ count++;
+ }
+
+ return count;
+}
+
static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
+ bool wakeup;
+ if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+slow_path:
local_irq_save(flags);
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
do_release_stripe(conf, sh);
spin_unlock(&conf->device_lock);
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
&& !test_bit(STRIPE_EXPANDING, &sh->state)
- && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
+ && !test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
*/
smp_mb__before_clear_bit();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ /*
+ * STRIPE_ON_RELEASE_LIST could be set here. In that
+ * case, the count is always > 1 here
+ */
__release_stripe(conf, sh);
cnt++;
}
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- int batch_size;
+ int batch_size, released;
+
+ released = release_stripe_list(conf);
if (
!list_empty(&conf->bitmap_list)) {
}
batch_size = handle_active_stripes(conf);
- if (!batch_size)
+ if (!batch_size && !released)
break;
handled += batch_size;
INIT_LIST_HEAD(&conf->delayed_list);
INIT_LIST_HEAD(&conf->bitmap_list);
INIT_LIST_HEAD(&conf->inactive_list);
+ init_llist_head(&conf->released_stripes);
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
atomic_set(&conf->active_aligned_reads, 0);
struct stripe_head {
struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
+ struct llist_node release_list;
struct r5conf *raid_conf;
short generation; /* increments with every
* reshape */
STRIPE_OPS_REQ_PENDING,
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
+ STRIPE_ON_RELEASE_LIST,
};
/*
*/
atomic_t active_stripes;
struct list_head inactive_list;
+ struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
int inactive_blocked; /* release of inactive stripes blocked,