epoll: do not insert into poll queues until all sanity checks are done
authorAl Viro <viro@zeniv.linux.org.uk>
Thu, 10 Sep 2020 02:25:06 +0000 (22:25 -0400)
committerPDO SCM Team <hudsoncm@motorola.com>
Thu, 30 Dec 2021 08:42:08 +0000 (02:42 -0600)
commit f8d4f44df056c5b504b0d49683fb7279218fd207 upstream.

Mot-CRs-fixed: (CR)
CVE-Fixed: CVE-2021-39634
Bug: 204450605

Change-Id: If2c466f2eecd8acdb152456d94a00210a4c0af74
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Gajjala Chakradhar <gajjalac@motorola.com>
Reviewed-on: https://gerrit.mot.com/2152183
SME-Granted: SME Approvals Granted
SLTApproved: Slta Waiver
Tested-by: Jira Key
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

fs/eventpoll.c

index a10c6081e48fd953c5a86731c6047c5e8dcb96bf..b4ff1fee79cf6399e001df5a8e4635c06126b675 100644 (file)
@@ -1462,6 +1462,22 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
                RCU_INIT_POINTER(epi->ws, NULL);
        }
 
+       /* Add the current item to the list of active epoll hook for this file */
+       spin_lock(&tfile->f_lock);
+       list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
+       spin_unlock(&tfile->f_lock);
+
+       /*
+        * Add the current item to the RB tree. All RB tree operations are
+        * protected by "mtx", and ep_insert() is called with "mtx" held.
+        */
+       ep_rbtree_insert(ep, epi);
+
+       /* now check if we've created too many backpaths */
+       error = -EINVAL;
+       if (full_check && reverse_path_check())
+               goto error_remove_epi;
+
        /* Initialize the poll table using the queue callback */
        epq.epi = epi;
        init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
@@ -1484,22 +1500,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
        if (epi->nwait < 0)
                goto error_unregister;
 
-       /* Add the current item to the list of active epoll hook for this file */
-       spin_lock(&tfile->f_lock);
-       list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
-       spin_unlock(&tfile->f_lock);
-
-       /*
-        * Add the current item to the RB tree. All RB tree operations are
-        * protected by "mtx", and ep_insert() is called with "mtx" held.
-        */
-       ep_rbtree_insert(ep, epi);
-
-       /* now check if we've created too many backpaths */
-       error = -EINVAL;
-       if (full_check && reverse_path_check())
-               goto error_remove_epi;
-
        /* We have to drop the new item inside our item list to keep track of it */
        spin_lock_irqsave(&ep->lock, flags);
 
@@ -1528,6 +1528,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
 
        return 0;
 
+error_unregister:
+       ep_unregister_pollwait(ep, epi);
 error_remove_epi:
        spin_lock(&tfile->f_lock);
        list_del_rcu(&epi->fllink);
@@ -1535,9 +1537,6 @@ error_remove_epi:
 
        rb_erase_cached(&epi->rbn, &ep->rbr);
 
-error_unregister:
-       ep_unregister_pollwait(ep, epi);
-
        /*
         * We need to do this because an event could have been arrived on some
         * allocated wait queue. Note that we don't care about the ep->ovflist