Merge commit '8700c95adb03' into timers/nohz
authorFrederic Weisbecker <fweisbec@gmail.com>
Thu, 2 May 2013 15:37:49 +0000 (17:37 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Thu, 2 May 2013 15:54:19 +0000 (17:54 +0200)
The full dynticks tree needs the latest RCU and sched
upstream updates in order to fix some dependencies.

Merge a common upstream merge point that has these
updates.

Conflicts:
include/linux/perf_event.h
kernel/rcutree.h
kernel/rcutree_plugin.h

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
17 files changed:
1  2 
Documentation/RCU/stallwarn.txt
Documentation/kernel-parameters.txt
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/sched.h
init/Kconfig
init/main.c
kernel/events/core.c
kernel/hrtimer.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/softirq.c
kernel/time/tick-broadcast.c

Simple merge
Simple merge
index 0140830225e216103742f1dc1f6736d461f86d8a,e0373d26c24454a313fa47bb96d5211458db1c2d..f463a46424e240715f2cc0c3bbf58f38955db1a8
@@@ -799,12 -788,12 +788,18 @@@ static inline int __perf_event_disable(
  static inline void perf_event_task_tick(void)                         { }
  #endif
  
 +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
 +extern bool perf_event_can_stop_tick(void);
 +#else
 +static inline bool perf_event_can_stop_tick(void)                     { return true; }
 +#endif
 +
+ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+ extern void perf_restore_debug_store(void);
+ #else
+ static inline void perf_restore_debug_store(void)                     { }
+ #endif
  #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
  
  /*
Simple merge
Simple merge
diff --cc init/Kconfig
index 8f97a7407714736cf6507c1d6f897f3303f12e6f,4367e1379002d8368fbfe9f9a4101af9c7efcfa4..66f67afad4fad4758d802938d5c3a4fefbc4dbf4
@@@ -580,16 -576,19 +576,19 @@@ config RCU_FANOUT_EXAC
  
  config RCU_FAST_NO_HZ
        bool "Accelerate last non-dyntick-idle CPU's grace periods"
 -      depends on NO_HZ && SMP
 +      depends on NO_HZ_COMMON && SMP
        default n
        help
-         This option causes RCU to attempt to accelerate grace periods in
-         order to allow CPUs to enter dynticks-idle state more quickly.
-         On the other hand, this option increases the overhead of the
-         dynticks-idle checking, thus degrading scheduling latency.
+         This option permits CPUs to enter dynticks-idle state even if
+         they have RCU callbacks queued, and prevents RCU from waking
+         these CPUs up more than roughly once every four jiffies (by
+         default, you can adjust this using the rcutree.rcu_idle_gp_delay
+         parameter), thus improving energy efficiency.  On the other
+         hand, this option increases the duration of RCU grace periods,
+         for example, slowing down synchronize_rcu().
  
-         Say Y if energy efficiency is critically important, and you don't
-               care about real-time response.
+         Say Y if energy efficiency is critically important, and you
+               don't care about increased grace-period durations.
  
          Say N if you are unsure.
  
diff --cc init/main.c
Simple merge
Simple merge
Simple merge
Simple merge
index 38acc49da2c6cf7e0303afdfe731a11460c560a1,14ee40795d6fe09257818c11a1a456a96214cf4c..da77a8f57ff95f80c7546684eba2ac293fdde08c
@@@ -529,16 -526,18 +526,18 @@@ static void print_cpu_stall_info(struc
  static void print_cpu_stall_info_end(void);
  static void zero_cpu_stall_ticks(struct rcu_data *rdp);
  static void increment_cpu_stall_ticks(void);
 -static bool is_nocb_cpu(int cpu);
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp);
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
+ static void rcu_init_one_nocb(struct rcu_node *rnp);
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy);
  static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
                                      struct rcu_data *rdp);
- static bool nocb_cpu_expendable(int cpu);
  static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
  static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
- static void init_nocb_callback_list(struct rcu_data *rdp);
- static void __init rcu_init_nocb(void);
 +static void rcu_kick_nohz_cpu(int cpu);
+ static bool init_nocb_callback_list(struct rcu_data *rdp);
  
  #endif /* #ifndef RCU_TREE_NONCORE */
  
index 0cd91cc18db410c866e1405c1a207d803e16b6a1,d084ae3f281c2cad8a44075be8d3ebacadd7929c..71bd7337d0ccf2e49ff15ae3cdd687a2519352f8
@@@ -2166,8 -2010,49 +2011,49 @@@ static int __init parse_rcu_nocb_poll(c
  }
  early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
  
+ /*
+  * Do any no-CBs CPUs need another grace period?
+  *
+  * Interrupts must be disabled.  If the caller does not hold the root
+  * rnp_node structure's ->lock, the results are advisory only.
+  */
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+       struct rcu_node *rnp = rcu_get_root(rsp);
+       return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
+ }
+ /*
+  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
+  * grace period.
+  */
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ {
+       wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+ /*
+  * Set the root rcu_node structure's ->need_future_gp field
+  * based on the sum of those of all rcu_node structures.  This does
+  * double-count the root rcu_node structure's requests, but this
+  * is necessary to handle the possibility of a rcu_nocb_kthread()
+  * having awakened during the time that the rcu_node structures
+  * were being updated for the end of the previous grace period.
+  */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+       rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
+ }
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+       init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+       init_waitqueue_head(&rnp->nocb_gp_wq[1]);
+ }
  /* Is the specified CPU a no-CPUs CPU? */
 -static bool is_nocb_cpu(int cpu)
 +bool rcu_is_nocb_cpu(int cpu)
  {
        if (have_rcu_nocb_mask)
                return cpumask_test_cpu(cpu, rcu_nocb_mask);
@@@ -2225,9 -2110,16 +2111,16 @@@ static bool __call_rcu_nocb(struct rcu_
                            bool lazy)
  {
  
 -      if (!is_nocb_cpu(rdp->cpu))
 +      if (!rcu_is_nocb_cpu(rdp->cpu))
                return 0;
        __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
+       if (__is_kfree_rcu_offset((unsigned long)rhp->func))
+               trace_rcu_kfree_callback(rdp->rsp->name, rhp,
+                                        (unsigned long)rhp->func,
+                                        rdp->qlen_lazy, rdp->qlen);
+       else
+               trace_rcu_callback(rdp->rsp->name, rhp,
+                                  rdp->qlen_lazy, rdp->qlen);
        return 1;
  }
  
@@@ -2448,22 -2282,35 +2283,30 @@@ static bool init_nocb_callback_list(str
  {
        if (rcu_nocb_mask == NULL ||
            !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
-               return;
+               return false;
        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+       return true;
+ }
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
+ {
+       return 0;
  }
  
- /* Initialize the ->call_remote fields in the rcu_state structures. */
- static void __init rcu_init_nocb(void)
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  {
- #ifdef CONFIG_PREEMPT_RCU
-       rcu_preempt_state.call_remote = call_rcu_preempt_remote;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
-       rcu_bh_state.call_remote = call_rcu_bh_remote;
-       rcu_sched_state.call_remote = call_rcu_sched_remote;
  }
  
- #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
+ {
+ }
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+ }
  
 -static bool is_nocb_cpu(int cpu)
 -{
 -      return false;
 -}
 -
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
                            bool lazy)
  {
Simple merge
Simple merge
index eb363aa5d83cf02a6f2717a79a56e815de4b1c43,4c225c4c7111d7baaa3dfa3d4ec0b3f9a966c593..24dc2989774937be3f1161541d8fb8860aeec4d0
@@@ -5,9 -5,9 +5,10 @@@
  #include <linux/mutex.h>
  #include <linux/spinlock.h>
  #include <linux/stop_machine.h>
 +#include <linux/tick.h>
  
  #include "cpupri.h"
+ #include "cpuacct.h"
  
  extern __read_mostly int scheduler_running;
  
index 8b1446d4a4dbbe3f1e090bebdeb53f9c9c49f192,14d7758074aadf4d1c43947ecef675e8bb6c044e..51a09d56e78b875cc2c329afb59876f52afdaf8b
@@@ -323,33 -323,12 +323,25 @@@ void irq_enter(void
  
  static inline void invoke_softirq(void)
  {
-       if (!force_irqthreads) {
- #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+       if (!force_irqthreads)
                __do_softirq();
- #else
-               do_softirq();
- #endif
-       } else {
-               __local_bh_disable((unsigned long)__builtin_return_address(0),
-                               SOFTIRQ_OFFSET);
+       else
                wakeup_softirqd();
-               __local_bh_enable(SOFTIRQ_OFFSET);
-       }
  }
  
 +static inline void tick_irq_exit(void)
 +{
 +#ifdef CONFIG_NO_HZ_COMMON
 +      int cpu = smp_processor_id();
 +
 +      /* Make sure that timer wheel updates are propagated */
 +      if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
 +              if (!in_interrupt())
 +                      tick_nohz_irq_exit();
 +      }
 +#endif
 +}
 +
  /*
   * Exit an interrupt context. Process softirqs if needed and possible:
   */
@@@ -361,9 -346,12 +359,8 @@@ void irq_exit(void
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
  
 -#ifdef CONFIG_NO_HZ
 -      /* Make sure that timer wheel updates are propagated */
 -      if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
 -              tick_nohz_irq_exit();
 -#endif
 +      tick_irq_exit();
        rcu_irq_exit();
-       sched_preempt_enable_no_resched();
  }
  
  /*
Simple merge