Merge commit 'v2.6.37-rc2' into sched/core
authorIngo Molnar <mingo@elte.hu>
Thu, 18 Nov 2010 12:22:14 +0000 (13:22 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 18 Nov 2010 12:22:26 +0000 (13:22 +0100)
Merge reason: Move to a .37-rc base.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
include/linux/sched.h
kernel/irq/manage.c
kernel/sched.c
kernel/softirq.c
kernel/watchdog.c

diff --combined include/linux/sched.h
index 849c8670583d4aa8ea1cbd920b9ba8ab976c515f,d0036e52a24a1c28eaef9e60bb5aa100aaa18af1..3cd70cf91fdebf9612a772070aeae3e94cd7672b
@@@ -336,6 -336,9 +336,9 @@@ extern unsigned long sysctl_hung_task_w
  extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
                                         void __user *buffer,
                                         size_t *lenp, loff_t *ppos);
+ #else
+ /* Avoid need for ifdefs elsewhere in the code */
+ enum { sysctl_hung_task_timeout_secs = 0 };
  #endif
  
  /* Attach to any functions which should be ignored in wchan output. */
@@@ -623,6 -626,10 +626,10 @@@ struct signal_struct 
  
        int oom_adj;            /* OOM kill score adjustment (bit shift) */
        int oom_score_adj;      /* OOM kill score adjustment */
+       struct mutex cred_guard_mutex;  /* guard against foreign influences on
+                                        * credential calculations
+                                        * (notably. ptrace) */
  };
  
  /* Context switch must be unlocked if interrupts are to be enabled */
@@@ -665,6 -672,9 +672,9 @@@ struct user_struct 
        atomic_t inotify_watches; /* How many inotify watches does this user have? */
        atomic_t inotify_devs;  /* How many inotify devs does this user have opened? */
  #endif
+ #ifdef CONFIG_FANOTIFY
+       atomic_t fanotify_listeners;
+ #endif
  #ifdef CONFIG_EPOLL
        atomic_t epoll_watches; /* The number of file descriptors currently watched */
  #endif
@@@ -1073,7 -1083,7 +1083,7 @@@ struct sched_class 
                                         struct task_struct *task);
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
-       void (*moved_group) (struct task_struct *p, int on_rq);
+       void (*task_move_group) (struct task_struct *p, int on_rq);
  #endif
  };
  
@@@ -1302,9 -1312,6 +1312,6 @@@ struct task_struct 
                                         * credentials (COW) */
        const struct cred __rcu *cred;  /* effective (overridable) subjective task
                                         * credentials (COW) */
-       struct mutex cred_guard_mutex;  /* guard against foreign influences on
-                                        * credential calculations
-                                        * (notably. ptrace) */
        struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
  
        char comm[TASK_COMM_LEN]; /* executable name excluding path
@@@ -1703,7 -1710,6 +1710,6 @@@ extern void thread_group_times(struct t
  #define PF_DUMPCORE   0x00000200      /* dumped core */
  #define PF_SIGNALED   0x00000400      /* killed by a signal */
  #define PF_MEMALLOC   0x00000800      /* Allocating memory */
- #define PF_FLUSHER    0x00001000      /* responsible for disk writeback */
  #define PF_USED_MATH  0x00002000      /* if unset the fpu must be initialized before use */
  #define PF_FREEZING   0x00004000      /* freeze in progress. do not account to load */
  #define PF_NOFREEZE   0x00008000      /* this thread should not be frozen */
@@@ -1942,10 -1948,9 +1948,10 @@@ extern int task_nice(const struct task_
  extern int can_nice(const struct task_struct *p, const int nice);
  extern int task_curr(const struct task_struct *p);
  extern int idle_cpu(int cpu);
 -extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
 +extern int sched_setscheduler(struct task_struct *, int,
 +                            const struct sched_param *);
  extern int sched_setscheduler_nocheck(struct task_struct *, int,
 -                                    struct sched_param *);
 +                                    const struct sched_param *);
  extern struct task_struct *idle_task(int cpu);
  extern struct task_struct *curr_task(int cpu);
  extern void set_curr_task(int cpu, struct task_struct *p);
@@@ -2235,9 -2240,16 +2241,16 @@@ static inline void task_unlock(struct t
        spin_unlock(&p->alloc_lock);
  }
  
- extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
                                                        unsigned long *flags);
  
+ #define lock_task_sighand(tsk, flags)                                 \
+ ({    struct sighand_struct *__ss;                                    \
+       __cond_lock(&(tsk)->sighand->siglock,                           \
+                   (__ss = __lock_task_sighand(tsk, flags)));          \
+       __ss;                                                           \
+ })                                                                    \
  static inline void unlock_task_sighand(struct task_struct *tsk,
                                                unsigned long *flags)
  {
diff --combined kernel/irq/manage.c
index 850f030fa0c23de862a9c5923de9f1a8daf24364,5f92acc5f952e0afb0489017c265a943a4a7d464..91a5fa25054e1d14d62339749f3229fae49f3766
@@@ -324,6 -324,10 +324,10 @@@ void enable_irq(unsigned int irq
        if (!desc)
                return;
  
+       if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
+           KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+               return;
        chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
        __enable_irq(desc, irq, false);
@@@ -573,9 -577,7 +577,9 @@@ irq_thread_check_affinity(struct irq_de
   */
  static int irq_thread(void *data)
  {
 -      struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
 +      static struct sched_param param = {
 +              .sched_priority = MAX_USER_RT_PRIO/2,
 +      };
        struct irqaction *action = data;
        struct irq_desc *desc = irq_to_desc(action->irq);
        int wake, oneshot = desc->status & IRQ_ONESHOT;
diff --combined kernel/sched.c
index 51944e8c38a8962c19cce561343ab0d6e5c04e76,aa14a56f9d037cde21ae19ef86b142e1c23fe736..41f18695b730071c2cb6e7abbcf852f283832b78
@@@ -4701,7 -4701,7 +4701,7 @@@ static bool check_same_owner(struct tas
  }
  
  static int __sched_setscheduler(struct task_struct *p, int policy,
 -                              struct sched_param *param, bool user)
 +                              const struct sched_param *param, bool user)
  {
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        unsigned long flags;
@@@ -4856,7 -4856,7 +4856,7 @@@ recheck
   * NOTE that the task may be already dead.
   */
  int sched_setscheduler(struct task_struct *p, int policy,
 -                     struct sched_param *param)
 +                     const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, true);
  }
@@@ -4874,7 -4874,7 +4874,7 @@@ EXPORT_SYMBOL_GPL(sched_setscheduler)
   * but our caller might not have that capability.
   */
  int sched_setscheduler_nocheck(struct task_struct *p, int policy,
 -                             struct sched_param *param)
 +                             const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, false);
  }
@@@ -8510,12 -8510,12 +8510,12 @@@ void sched_move_task(struct task_struc
        if (unlikely(running))
                tsk->sched_class->put_prev_task(rq, tsk);
  
-       set_task_rq(tsk, task_cpu(tsk));
  #ifdef CONFIG_FAIR_GROUP_SCHED
-       if (tsk->sched_class->moved_group)
-               tsk->sched_class->moved_group(tsk, on_rq);
+       if (tsk->sched_class->task_move_group)
+               tsk->sched_class->task_move_group(tsk, on_rq);
+       else
  #endif
+               set_task_rq(tsk, task_cpu(tsk));
  
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
diff --combined kernel/softirq.c
index 081869ed3a9f9971bbf346cbb6c5db3cf797c7f2,18f4be0d5fe0bbf853935972d9b441e95bc61c5a..d4d918a91881407acd8abbde6691f77197cd013c
@@@ -67,7 -67,7 +67,7 @@@ char *softirq_to_name[NR_SOFTIRQS] = 
   * to the pending events, so lets the scheduler to balance
   * the softirq load for us.
   */
- void wakeup_softirqd(void)
static void wakeup_softirqd(void)
  {
        /* Interrupts are disabled: no need to stop preemption */
        struct task_struct *tsk = __get_cpu_var(ksoftirqd);
@@@ -229,18 -229,20 +229,20 @@@ restart
  
        do {
                if (pending & 1) {
+                       unsigned int vec_nr = h - softirq_vec;
                        int prev_count = preempt_count();
-                       kstat_incr_softirqs_this_cpu(h - softirq_vec);
  
-                       trace_softirq_entry(h, softirq_vec);
+                       kstat_incr_softirqs_this_cpu(vec_nr);
+                       trace_softirq_entry(vec_nr);
                        h->action(h);
-                       trace_softirq_exit(h, softirq_vec);
+                       trace_softirq_exit(vec_nr);
                        if (unlikely(prev_count != preempt_count())) {
-                               printk(KERN_ERR "huh, entered softirq %td %s %p"
+                               printk(KERN_ERR "huh, entered softirq %u %s %p"
                                       "with preempt_count %08x,"
-                                      " exited with %08x?\n", h - softirq_vec,
-                                      softirq_to_name[h - softirq_vec],
-                                      h->action, prev_count, preempt_count());
+                                      " exited with %08x?\n", vec_nr,
+                                      softirq_to_name[vec_nr], h->action,
+                                      prev_count, preempt_count());
                                preempt_count() = prev_count;
                        }
  
@@@ -851,9 -853,7 +853,9 @@@ static int __cpuinit cpu_callback(struc
                             cpumask_any(cpu_online_mask));
        case CPU_DEAD:
        case CPU_DEAD_FROZEN: {
 -              struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 +              static struct sched_param param = {
 +                      .sched_priority = MAX_RT_PRIO-1
 +              };
  
                p = per_cpu(ksoftirqd, hotcpu);
                per_cpu(ksoftirqd, hotcpu) = NULL;
diff --combined kernel/watchdog.c
index 94ca779aa9c24dfdc5b08a51694f1fb7a789179b,6e3c41a4024c1cc66be01218e2c37498498f2469..14b8120d52320486c3006e0cffbc7c9fff84de15
@@@ -43,7 -43,7 +43,7 @@@ static DEFINE_PER_CPU(unsigned long, hr
  static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  #endif
  
- static int __initdata no_watchdog;
+ static int no_watchdog;
  
  
  /* boot commands */
@@@ -307,7 -307,7 +307,7 @@@ static enum hrtimer_restart watchdog_ti
   */
  static int watchdog(void *unused)
  {
 -      struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 +      static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
        struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  
        sched_setscheduler(current, SCHED_FIFO, &param);