static int cgroup_call_pre_destroy(struct cgroup *cgrp)
{
struct cgroup_subsys *ss;
- struct cgroup_event *event, *tmp;
int ret = 0;
for_each_subsys(cgrp->root, ss)
if (ss->pre_destroy) {
ret = ss->pre_destroy(ss, cgrp);
if (ret)
- goto out;
+ break;
}
- /*
- * Unregister events and notify userspace.
- */
- spin_lock(&cgrp->event_list_lock);
- list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
- list_del(&event->list);
- eventfd_signal(event->eventfd, 1);
- schedule_work(&event->remove);
- }
- spin_unlock(&cgrp->event_list_lock);
-
-out:
return ret;
}
event->cft->unregister_event(cgrp, event->cft, event->eventfd);
eventfd_ctx_put(event->eventfd);
- remove_wait_queue(event->wqh, &event->wait);
kfree(event);
}
unsigned long flags = (unsigned long)key;
if (flags & POLLHUP) {
+ remove_wait_queue_locked(event->wqh, &event->wait);
spin_lock(&cgrp->event_list_lock);
list_del(&event->list);
spin_unlock(&cgrp->event_list_lock);
struct dentry *d;
struct cgroup *parent;
DEFINE_WAIT(wait);
+ struct cgroup_event *event, *tmp;
int ret;
/* the vfs holds both inode->i_mutex already */
set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
+ /*
+ * Unregister events and notify userspace.
+ * Notify userspace about cgroup removing only after rmdir of cgroup
+ * directory to avoid race between userspace and kernelspace
+ */
+ spin_lock(&cgrp->event_list_lock);
+ list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
+ list_del(&event->list);
+ remove_wait_queue(event->wqh, &event->wait);
+ eventfd_signal(event->eventfd, 1);
+ schedule_work(&event->remove);
+ }
+ spin_unlock(&cgrp->event_list_lock);
+
mutex_unlock(&cgroup_mutex);
return 0;
}