drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / workqueue.c
index 3391a5cd0c583773b77c44ab58ca236e0659f237..1cafae743885236e74d2b3b7857a5244b972ecf4 100644 (file)
@@ -613,6 +613,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
         */
        smp_wmb();
        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+       /*
+        * The following mb guarantees that previous clear of a PENDING bit
+        * will not be reordered with any speculative LOADS or STORES from
+        * work->current_func, which is executed afterwards.  This possible
+        * reordering can lead to a missed execution on attempt to qeueue
+        * the same @work.  E.g. consider this case:
+        *
+        *   CPU#0                         CPU#1
+        *   ----------------------------  --------------------------------
+        *
+        * 1  STORE event_indicated
+        * 2  queue_work_on() {
+        * 3    test_and_set_bit(PENDING)
+        * 4 }                             set_..._and_clear_pending() {
+        * 5                                 set_work_data() # clear bit
+        * 6                                 smp_mb()
+        * 7                               work->current_func() {
+        * 8                                  LOAD event_indicated
+        *                                 }
+        *
+        * Without an explicit full barrier speculative LOAD on line 8 can
+        * be executed before CPU#0 does STORE on line 1.  If that happens,
+        * CPU#0 observes the PENDING bit is still set and new execution of
+        * a @work is not queued in a hope, that CPU#1 will eventually
+        * finish the queued @work.  Meanwhile CPU#1 does not see
+        * event_indicated is set, because speculative LOAD was executed
+        * before actual STORE.
+        */
+       smp_mb();
 }
 
 static void clear_work_data(struct work_struct *work)
@@ -1466,13 +1495,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
-       /* timer isn't guaranteed to run in this cpu, record earlier */
-       if (cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       add_timer_on(timer, cpu);
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
 
 /**
@@ -3405,7 +3434,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
         * attributes breaks ordering guarantee.  Disallow exposing ordered
         * workqueues.
         */
-       if (WARN_ON(wq->flags & __WQ_ORDERED))
+       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                return -EINVAL;
 
        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
@@ -3970,8 +3999,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
                return -EINVAL;
 
        /* creating multiple pwqs breaks ordering guarantee */
-       if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
-               return -EINVAL;
+       if (!list_empty(&wq->pwqs)) {
+               if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+                       return -EINVAL;
+
+               wq->flags &= ~__WQ_ORDERED;
+       }
 
        pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
@@ -4219,6 +4252,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        struct workqueue_struct *wq;
        struct pool_workqueue *pwq;
 
+       /*
+        * Unbound && max_active == 1 used to imply ordered, which is no
+        * longer the case on NUMA machines due to per-node pools.  While
+        * alloc_ordered_workqueue() is the right way to create an ordered
+        * workqueue, keep the previous behavior to avoid subtle breakages
+        * on NUMA.
+        */
+       if ((flags & WQ_UNBOUND) && max_active == 1)
+               flags |= __WQ_ORDERED;
+
        /* allocate wq and format name */
        if (flags & WQ_UNBOUND)
                tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
@@ -4407,13 +4450,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
        struct pool_workqueue *pwq;
 
        /* disallow meddling with max_active for ordered workqueues */
-       if (WARN_ON(wq->flags & __WQ_ORDERED))
+       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                return;
 
        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
        mutex_lock(&wq->mutex);
 
+       wq->flags &= ~__WQ_ORDERED;
        wq->saved_max_active = max_active;
 
        for_each_pwq(pwq, wq)