sched: Remove pointless in_atomic() definition check
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / sched.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
b7b3c76a
DW
4/*
5 * cloning flags:
6 */
7#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
8#define CLONE_VM 0x00000100 /* set if VM shared between processes */
9#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
10#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
11#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
12#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
15#define CLONE_THREAD 0x00010000 /* Same thread group? */
16#define CLONE_NEWNS 0x00020000 /* New namespace group? */
17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
20#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
21#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
22#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
23#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
43bb40c9
DJ
24/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
25 and is now available for re-use. */
071df104 26#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
25b21cb2 27#define CLONE_NEWIPC 0x08000000 /* New ipcs */
77ec739d 28#define CLONE_NEWUSER 0x10000000 /* New user namespace */
30e49c26 29#define CLONE_NEWPID 0x20000000 /* New pid namespace */
169e3674 30#define CLONE_NEWNET 0x40000000 /* New network namespace */
fadad878 31#define CLONE_IO 0x80000000 /* Clone io context */
b7b3c76a
DW
32
33/*
34 * Scheduling policies
35 */
36#define SCHED_NORMAL 0
37#define SCHED_FIFO 1
38#define SCHED_RR 2
39#define SCHED_BATCH 3
0e6aca43
IM
40/* SCHED_ISO: reserved but not implemented yet */
41#define SCHED_IDLE 5
ca94c442
LP
42/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
43#define SCHED_RESET_ON_FORK 0x40000000
b7b3c76a 44
a3b6714e 45#ifdef __KERNEL__
b7b3c76a
DW
46
47struct sched_param {
48 int sched_priority;
49};
50
1da177e4
LT
51#include <asm/param.h> /* for HZ */
52
1da177e4
LT
53#include <linux/capability.h>
54#include <linux/threads.h>
55#include <linux/kernel.h>
56#include <linux/types.h>
57#include <linux/timex.h>
58#include <linux/jiffies.h>
59#include <linux/rbtree.h>
60#include <linux/thread_info.h>
61#include <linux/cpumask.h>
62#include <linux/errno.h>
63#include <linux/nodemask.h>
c92ff1bd 64#include <linux/mm_types.h>
1da177e4
LT
65
66#include <asm/system.h>
1da177e4
LT
67#include <asm/page.h>
68#include <asm/ptrace.h>
1da177e4
LT
69#include <asm/cputime.h>
70
71#include <linux/smp.h>
72#include <linux/sem.h>
73#include <linux/signal.h>
1da177e4
LT
74#include <linux/compiler.h>
75#include <linux/completion.h>
76#include <linux/pid.h>
77#include <linux/percpu.h>
78#include <linux/topology.h>
3e26c149 79#include <linux/proportions.h>
1da177e4 80#include <linux/seccomp.h>
e56d0903 81#include <linux/rcupdate.h>
05725f7e 82#include <linux/rculist.h>
23f78d4a 83#include <linux/rtmutex.h>
1da177e4 84
a3b6714e
DW
85#include <linux/time.h>
86#include <linux/param.h>
87#include <linux/resource.h>
88#include <linux/timer.h>
89#include <linux/hrtimer.h>
7c3ab738 90#include <linux/task_io_accounting.h>
9745512c 91#include <linux/latencytop.h>
9e2b2dc4 92#include <linux/cred.h>
a3b6714e
DW
93
94#include <asm/processor.h>
36d57ac4 95
1da177e4 96struct exec_domain;
c87e2837 97struct futex_pi_state;
286100a6 98struct robust_list_head;
bddd87c7 99struct bio_list;
5ad4e53b 100struct fs_struct;
cdd6c482 101struct perf_event_context;
73c10101 102struct blk_plug;
1da177e4 103
1da177e4
LT
104/*
105 * List of flags we want to share for kernel threads,
106 * if only because they are not used by them anyway.
107 */
108#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
109
110/*
111 * These are the constant used to fake the fixed-point load-average
112 * counting. Some notes:
113 * - 11 bit fractions expand to 22 bits by the multiplies: this gives
114 * a load-average precision of 10 bits integer + 11 bits fractional
115 * - if you want to count load-averages more often, you need more
116 * precision, or rounding will get you. With 2-second counting freq,
117 * the EXP_n values would be 1981, 2034 and 2043 if still using only
118 * 11 bit fractions.
119 */
120extern unsigned long avenrun[]; /* Load averages */
2d02494f 121extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
1da177e4
LT
122
123#define FSHIFT 11 /* nr of bits of precision */
124#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
0c2043ab 125#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
1da177e4
LT
126#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
127#define EXP_5 2014 /* 1/exp(5sec/5min) */
128#define EXP_15 2037 /* 1/exp(5sec/15min) */
129
130#define CALC_LOAD(load,exp,n) \
131 load *= exp; \
132 load += n*(FIXED_1-exp); \
133 load >>= FSHIFT;
134
135extern unsigned long total_forks;
136extern int nr_threads;
1da177e4
LT
137DECLARE_PER_CPU(unsigned long, process_counts);
138extern int nr_processes(void);
139extern unsigned long nr_running(void);
140extern unsigned long nr_uninterruptible(void);
141extern unsigned long nr_iowait(void);
8c215bd3 142extern unsigned long nr_iowait_cpu(int cpu);
69d25870
AV
143extern unsigned long this_cpu_load(void);
144
145
0f004f5a 146extern void calc_global_load(unsigned long ticks);
1da177e4 147
7e49fcce
SR
148extern unsigned long get_parent_ip(unsigned long addr);
149
43ae34cb
IM
150struct seq_file;
151struct cfs_rq;
4cf86d77 152struct task_group;
43ae34cb
IM
153#ifdef CONFIG_SCHED_DEBUG
154extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
155extern void proc_sched_set_task(struct task_struct *p);
156extern void
5cef9eca 157print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
43ae34cb
IM
158#else
159static inline void
160proc_sched_show_task(struct task_struct *p, struct seq_file *m)
161{
162}
163static inline void proc_sched_set_task(struct task_struct *p)
164{
165}
166static inline void
5cef9eca 167print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb
IM
168{
169}
170#endif
1da177e4 171
4a8342d2
LT
172/*
173 * Task state bitmask. NOTE! These bits are also
174 * encoded in fs/proc/array.c: get_task_state().
175 *
176 * We have two separate sets of flags: task->state
177 * is about runnability, while task->exit_state are
178 * about the task exiting. Confusing, but this way
179 * modifying one set can't modify the other one by
180 * mistake.
181 */
1da177e4
LT
182#define TASK_RUNNING 0
183#define TASK_INTERRUPTIBLE 1
184#define TASK_UNINTERRUPTIBLE 2
f021a3c2
MW
185#define __TASK_STOPPED 4
186#define __TASK_TRACED 8
4a8342d2
LT
187/* in tsk->exit_state */
188#define EXIT_ZOMBIE 16
189#define EXIT_DEAD 32
190/* in tsk->state again */
af927232 191#define TASK_DEAD 64
f021a3c2 192#define TASK_WAKEKILL 128
e9c84311 193#define TASK_WAKING 256
e1781538 194#define TASK_STATE_MAX 512
f021a3c2 195
44d90df6 196#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
73342151 197
e1781538
PZ
198extern char ___assert_task_state[1 - 2*!!(
199 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
f021a3c2
MW
200
201/* Convenience macros for the sake of set_task_state */
202#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
203#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
204#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
1da177e4 205
92a1f4bc
MW
206/* Convenience macros for the sake of wake_up */
207#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
f021a3c2 208#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
92a1f4bc
MW
209
210/* get_task_state() */
211#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
f021a3c2
MW
212 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
213 __TASK_TRACED)
92a1f4bc 214
f021a3c2
MW
215#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
216#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
8f92054e 217#define task_is_dead(task) ((task)->exit_state != 0)
92a1f4bc 218#define task_is_stopped_or_traced(task) \
f021a3c2 219 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
92a1f4bc 220#define task_contributes_to_load(task) \
e3c8ca83 221 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
6301cb95 222 (task->flags & PF_FREEZING) == 0)
1da177e4
LT
223
224#define __set_task_state(tsk, state_value) \
225 do { (tsk)->state = (state_value); } while (0)
226#define set_task_state(tsk, state_value) \
227 set_mb((tsk)->state, (state_value))
228
498d0c57
AM
229/*
230 * set_current_state() includes a barrier so that the write of current->state
231 * is correctly serialised wrt the caller's subsequent test of whether to
232 * actually sleep:
233 *
234 * set_current_state(TASK_UNINTERRUPTIBLE);
235 * if (do_i_need_to_sleep())
236 * schedule();
237 *
238 * If the caller does not need such serialisation then use __set_current_state()
239 */
1da177e4
LT
240#define __set_current_state(state_value) \
241 do { current->state = (state_value); } while (0)
242#define set_current_state(state_value) \
243 set_mb(current->state, (state_value))
244
245/* Task command name length */
246#define TASK_COMM_LEN 16
247
1da177e4
LT
248#include <linux/spinlock.h>
249
250/*
251 * This serializes "schedule()" and also protects
252 * the run-queue from deletions/modifications (but
253 * _adding_ to the beginning of the run-queue has
254 * a separate lock).
255 */
256extern rwlock_t tasklist_lock;
257extern spinlock_t mmlist_lock;
258
36c8b586 259struct task_struct;
1da177e4 260
db1466b3
PM
261#ifdef CONFIG_PROVE_RCU
262extern int lockdep_tasklist_lock_is_held(void);
263#endif /* #ifdef CONFIG_PROVE_RCU */
264
1da177e4
LT
265extern void sched_init(void);
266extern void sched_init_smp(void);
2d07b255 267extern asmlinkage void schedule_tail(struct task_struct *prev);
36c8b586 268extern void init_idle(struct task_struct *idle, int cpu);
1df21055 269extern void init_idle_bootup_task(struct task_struct *idle);
1da177e4 270
89f19f04 271extern int runqueue_is_locked(int cpu);
017730c1 272
6a7b3dc3 273extern cpumask_var_t nohz_cpu_mask;
46cb4b7c 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
83cd4fe2
VP
275extern void select_nohz_load_balancer(int stop_tick);
276extern int get_nohz_timer_target(void);
46cb4b7c 277#else
83cd4fe2 278static inline void select_nohz_load_balancer(int stop_tick) { }
46cb4b7c 279#endif
1da177e4 280
e59e2ae2 281/*
39bc89fd 282 * Only dump TASK_* tasks. (0 for all tasks)
e59e2ae2
IM
283 */
284extern void show_state_filter(unsigned long state_filter);
285
286static inline void show_state(void)
287{
39bc89fd 288 show_state_filter(0);
e59e2ae2
IM
289}
290
1da177e4
LT
291extern void show_regs(struct pt_regs *);
292
293/*
294 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
295 * task), SP is the stack pointer of the first frame that should be shown in the back
296 * trace (or NULL if the entire call-chain of the task should be shown).
297 */
298extern void show_stack(struct task_struct *task, unsigned long *sp);
299
300void io_schedule(void);
301long io_schedule_timeout(long timeout);
302
303extern void cpu_init (void);
304extern void trap_init(void);
305extern void update_process_times(int user);
306extern void scheduler_tick(void);
307
82a1fcb9
IM
308extern void sched_show_task(struct task_struct *p);
309
19cc36c0 310#ifdef CONFIG_LOCKUP_DETECTOR
8446f1d3 311extern void touch_softlockup_watchdog(void);
d6ad3e28 312extern void touch_softlockup_watchdog_sync(void);
04c9167f 313extern void touch_all_softlockup_watchdogs(void);
332fbdbc
DZ
314extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
315 void __user *buffer,
316 size_t *lenp, loff_t *ppos);
9c44bc03 317extern unsigned int softlockup_panic;
004417a6 318void lockup_detector_init(void);
8446f1d3 319#else
8446f1d3
IM
320static inline void touch_softlockup_watchdog(void)
321{
322}
d6ad3e28
JW
323static inline void touch_softlockup_watchdog_sync(void)
324{
325}
04c9167f
JF
326static inline void touch_all_softlockup_watchdogs(void)
327{
328}
004417a6
PZ
329static inline void lockup_detector_init(void)
330{
331}
8446f1d3
IM
332#endif
333
e162b39a
MSB
334#ifdef CONFIG_DETECT_HUNG_TASK
335extern unsigned int sysctl_hung_task_panic;
336extern unsigned long sysctl_hung_task_check_count;
337extern unsigned long sysctl_hung_task_timeout_secs;
338extern unsigned long sysctl_hung_task_warnings;
339extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
8d65af78 340 void __user *buffer,
e162b39a 341 size_t *lenp, loff_t *ppos);
e4ecda1b
ML
342#else
343/* Avoid need for ifdefs elsewhere in the code */
344enum { sysctl_hung_task_timeout_secs = 0 };
e162b39a 345#endif
8446f1d3 346
1da177e4
LT
347/* Attach to any functions which should be ignored in wchan output. */
348#define __sched __attribute__((__section__(".sched.text")))
deaf2227
IM
349
350/* Linker adds these: start and end of __sched functions */
351extern char __sched_text_start[], __sched_text_end[];
352
1da177e4
LT
353/* Is this address in the __sched functions? */
354extern int in_sched_functions(unsigned long addr);
355
356#define MAX_SCHEDULE_TIMEOUT LONG_MAX
b3c97528 357extern signed long schedule_timeout(signed long timeout);
64ed93a2 358extern signed long schedule_timeout_interruptible(signed long timeout);
294d5cc2 359extern signed long schedule_timeout_killable(signed long timeout);
64ed93a2 360extern signed long schedule_timeout_uninterruptible(signed long timeout);
1da177e4 361asmlinkage void schedule(void);
c6eb3dda 362extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
1da177e4 363
ab516013 364struct nsproxy;
acce292c 365struct user_namespace;
1da177e4 366
341c87bf
KH
367/*
368 * Default maximum number of active map areas, this limits the number of vmas
369 * per mm struct. Users can overwrite this number by sysctl but there is a
370 * problem.
371 *
372 * When a program's coredump is generated as ELF format, a section is created
373 * per a vma. In ELF, the number of sections is represented in unsigned short.
374 * This means the number of sections should be smaller than 65535 at coredump.
375 * Because the kernel adds some informative sections to a image of program at
376 * generating coredump, we need some margin. The number of extra sections is
377 * 1-3 now and depends on arch. We use "5" as safe margin, here.
378 */
379#define MAPCOUNT_ELF_CORE_MARGIN (5)
4be929be 380#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
1da177e4
LT
381
382extern int sysctl_max_map_count;
383
384#include <linux/aio.h>
385
efc1a3b1
DH
386#ifdef CONFIG_MMU
387extern void arch_pick_mmap_layout(struct mm_struct *mm);
1da177e4
LT
388extern unsigned long
389arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
390 unsigned long, unsigned long);
391extern unsigned long
392arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
393 unsigned long len, unsigned long pgoff,
394 unsigned long flags);
1363c3cd
WW
395extern void arch_unmap_area(struct mm_struct *, unsigned long);
396extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
efc1a3b1
DH
397#else
398static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
399#endif
1da177e4 400
901608d9 401
6c5d5238
KH
402extern void set_dumpable(struct mm_struct *mm, int value);
403extern int get_dumpable(struct mm_struct *mm);
404
405/* mm flags */
3cb4a0bb 406/* dumpable bits */
6c5d5238
KH
407#define MMF_DUMPABLE 0 /* core dump is permitted */
408#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
f8af4da3 409
3cb4a0bb 410#define MMF_DUMPABLE_BITS 2
f8af4da3 411#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
3cb4a0bb
KH
412
413/* coredump filter bits */
414#define MMF_DUMP_ANON_PRIVATE 2
415#define MMF_DUMP_ANON_SHARED 3
416#define MMF_DUMP_MAPPED_PRIVATE 4
417#define MMF_DUMP_MAPPED_SHARED 5
82df3973 418#define MMF_DUMP_ELF_HEADERS 6
e575f111
KM
419#define MMF_DUMP_HUGETLB_PRIVATE 7
420#define MMF_DUMP_HUGETLB_SHARED 8
f8af4da3 421
3cb4a0bb 422#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
e575f111 423#define MMF_DUMP_FILTER_BITS 7
3cb4a0bb
KH
424#define MMF_DUMP_FILTER_MASK \
425 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
426#define MMF_DUMP_FILTER_DEFAULT \
e575f111 427 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
656eb2cd
RM
428 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
429
430#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
431# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
432#else
433# define MMF_DUMP_MASK_DEFAULT_ELF 0
434#endif
f8af4da3
HD
435 /* leave room for more dump flags */
436#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
ba76149f 437#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
f8af4da3
HD
438
439#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
6c5d5238 440
1da177e4
LT
441struct sighand_struct {
442 atomic_t count;
443 struct k_sigaction action[_NSIG];
444 spinlock_t siglock;
b8fceee1 445 wait_queue_head_t signalfd_wqh;
1da177e4
LT
446};
447
0e464814 448struct pacct_struct {
f6ec29a4
KK
449 int ac_flag;
450 long ac_exitcode;
0e464814 451 unsigned long ac_mem;
77787bfb
KK
452 cputime_t ac_utime, ac_stime;
453 unsigned long ac_minflt, ac_majflt;
0e464814
KK
454};
455
42c4ab41
SG
456struct cpu_itimer {
457 cputime_t expires;
458 cputime_t incr;
8356b5f9
SG
459 u32 error;
460 u32 incr_error;
42c4ab41
SG
461};
462
f06febc9
FM
463/**
464 * struct task_cputime - collected CPU time counts
465 * @utime: time spent in user mode, in &cputime_t units
466 * @stime: time spent in kernel mode, in &cputime_t units
467 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
5ce73a4a 468 *
f06febc9
FM
469 * This structure groups together three kinds of CPU time that are
470 * tracked for threads and thread groups. Most things considering
471 * CPU time want to group these counts together and treat all three
472 * of them in parallel.
473 */
474struct task_cputime {
475 cputime_t utime;
476 cputime_t stime;
477 unsigned long long sum_exec_runtime;
478};
479/* Alternate field names when used to cache expirations. */
480#define prof_exp stime
481#define virt_exp utime
482#define sched_exp sum_exec_runtime
483
4cd4c1b4
PZ
484#define INIT_CPUTIME \
485 (struct task_cputime) { \
486 .utime = cputime_zero, \
487 .stime = cputime_zero, \
488 .sum_exec_runtime = 0, \
489 }
490
c99e6efe
PZ
491/*
492 * Disable preemption until the scheduler is running.
493 * Reset by start_kernel()->sched_init()->init_idle().
d86ee480
PZ
494 *
495 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
496 * before the scheduler is active -- see should_resched().
c99e6efe 497 */
d86ee480 498#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
c99e6efe 499
f06febc9 500/**
4cd4c1b4
PZ
501 * struct thread_group_cputimer - thread group interval timer counts
502 * @cputime: thread group interval timers.
503 * @running: non-zero when there are timers running and
504 * @cputime receives updates.
505 * @lock: lock for fields in this struct.
f06febc9
FM
506 *
507 * This structure contains the version of task_cputime, above, that is
4cd4c1b4 508 * used for thread group CPU timer calculations.
f06febc9 509 */
4cd4c1b4
PZ
510struct thread_group_cputimer {
511 struct task_cputime cputime;
512 int running;
513 spinlock_t lock;
f06febc9 514};
f06febc9 515
4714d1d3 516#include <linux/rwsem.h>
5091faa4
MG
517struct autogroup;
518
1da177e4 519/*
e815f0a8 520 * NOTE! "signal_struct" does not have its own
1da177e4
LT
521 * locking, because a shared signal_struct always
522 * implies a shared sighand_struct, so locking
523 * sighand_struct is always a proper superset of
524 * the locking of signal_struct.
525 */
526struct signal_struct {
ea6d290c 527 atomic_t sigcnt;
1da177e4 528 atomic_t live;
b3ac022c 529 int nr_threads;
1da177e4
LT
530
531 wait_queue_head_t wait_chldexit; /* for wait4() */
532
533 /* current thread group signal load-balancing target: */
36c8b586 534 struct task_struct *curr_target;
1da177e4
LT
535
536 /* shared signal handling: */
537 struct sigpending shared_pending;
538
539 /* thread group exit support */
540 int group_exit_code;
541 /* overloaded:
542 * - notify group_exit_task when ->count is equal to notify_count
543 * - everyone except group_exit_task is stopped during signal delivery
544 * of fatal signals, group_exit_task processes the signal.
545 */
1da177e4 546 int notify_count;
07dd20e0 547 struct task_struct *group_exit_task;
1da177e4
LT
548
549 /* thread group stop support, overloads group_exit_code too */
550 int group_stop_count;
551 unsigned int flags; /* see SIGNAL_* flags below */
552
553 /* POSIX.1b Interval Timers */
554 struct list_head posix_timers;
555
556 /* ITIMER_REAL timer for the process */
2ff678b8 557 struct hrtimer real_timer;
fea9d175 558 struct pid *leader_pid;
2ff678b8 559 ktime_t it_real_incr;
1da177e4 560
42c4ab41
SG
561 /*
562 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
563 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
564 * values are defined to 0 and 1 respectively
565 */
566 struct cpu_itimer it[2];
1da177e4 567
f06febc9 568 /*
4cd4c1b4
PZ
569 * Thread group totals for process CPU timers.
570 * See thread_group_cputimer(), et al, for details.
f06febc9 571 */
4cd4c1b4 572 struct thread_group_cputimer cputimer;
f06febc9
FM
573
574 /* Earliest-expiration cache. */
575 struct task_cputime cputime_expires;
576
577 struct list_head cpu_timers[3];
578
ab521dc0 579 struct pid *tty_old_pgrp;
1ec320af 580
1da177e4
LT
581 /* boolean value for session group leader */
582 int leader;
583
584 struct tty_struct *tty; /* NULL if no tty */
585
5091faa4
MG
586#ifdef CONFIG_SCHED_AUTOGROUP
587 struct autogroup *autogroup;
588#endif
1da177e4
LT
589 /*
590 * Cumulative resource counters for dead threads in the group,
591 * and for reaped dead child processes forked by this group.
592 * Live threads maintain their own counters and add to these
593 * in __exit_signal, except for the group leader.
594 */
32bd671d 595 cputime_t utime, stime, cutime, cstime;
9ac52315
LV
596 cputime_t gtime;
597 cputime_t cgtime;
0cf55e1e
HS
598#ifndef CONFIG_VIRT_CPU_ACCOUNTING
599 cputime_t prev_utime, prev_stime;
600#endif
1da177e4
LT
601 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
602 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
6eaeeaba 603 unsigned long inblock, oublock, cinblock, coublock;
1f10206c 604 unsigned long maxrss, cmaxrss;
940389b8 605 struct task_io_accounting ioac;
1da177e4 606
32bd671d
PZ
607 /*
608 * Cumulative ns of schedule CPU time fo dead threads in the
609 * group, not including a zombie group leader, (This only differs
610 * from jiffies_to_ns(utime + stime) if sched_clock uses something
611 * other than jiffies.)
612 */
613 unsigned long long sum_sched_runtime;
614
1da177e4
LT
615 /*
616 * We don't bother to synchronize most readers of this at all,
617 * because there is no reader checking a limit that actually needs
618 * to get both rlim_cur and rlim_max atomically, and either one
619 * alone is a single word that can safely be read normally.
620 * getrlimit/setrlimit use task_lock(current->group_leader) to
621 * protect this instead of the siglock, because they really
622 * have no need to disable irqs.
623 */
624 struct rlimit rlim[RLIM_NLIMITS];
625
0e464814
KK
626#ifdef CONFIG_BSD_PROCESS_ACCT
627 struct pacct_struct pacct; /* per-process accounting information */
628#endif
ad4ecbcb 629#ifdef CONFIG_TASKSTATS
ad4ecbcb
SN
630 struct taskstats *stats;
631#endif
522ed776
MT
632#ifdef CONFIG_AUDIT
633 unsigned audit_tty;
634 struct tty_audit_buf *tty_audit_buf;
635#endif
4714d1d3
BB
636#ifdef CONFIG_CGROUPS
637 /*
638 * The threadgroup_fork_lock prevents threads from forking with
639 * CLONE_THREAD while held for writing. Use this for fork-sensitive
640 * threadgroup-wide operations. It's taken for reading in fork.c in
641 * copy_process().
642 * Currently only needed write-side by cgroups.
643 */
644 struct rw_semaphore threadgroup_fork_lock;
645#endif
28b83c51 646
a63d83f4
DR
647 int oom_adj; /* OOM kill score adjustment (bit shift) */
648 int oom_score_adj; /* OOM kill score adjustment */
dabb16f6
MSB
649 int oom_score_adj_min; /* OOM kill score adjustment minimum value.
650 * Only settable by CAP_SYS_RESOURCE. */
9b1bf12d
KM
651
652 struct mutex cred_guard_mutex; /* guard against foreign influences on
653 * credential calculations
654 * (notably. ptrace) */
1da177e4
LT
655};
656
4866cde0
NP
657/* Context switch must be unlocked if interrupts are to be enabled */
658#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
659# define __ARCH_WANT_UNLOCKED_CTXSW
660#endif
661
1da177e4
LT
662/*
663 * Bits in flags field of signal_struct.
664 */
665#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
ee77f075
ON
666#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
667#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
e4420551
ON
668/*
669 * Pending notifications to parent.
670 */
671#define SIGNAL_CLD_STOPPED 0x00000010
672#define SIGNAL_CLD_CONTINUED 0x00000020
673#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
1da177e4 674
fae5fa44
ON
675#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
676
ed5d2cac
ON
677/* If true, all threads except ->group_exit_task have pending SIGKILL */
678static inline int signal_group_exit(const struct signal_struct *sig)
679{
680 return (sig->flags & SIGNAL_GROUP_EXIT) ||
681 (sig->group_exit_task != NULL);
682}
683
1da177e4
LT
684/*
685 * Some day this will be a full-fledged user tracking system..
686 */
687struct user_struct {
688 atomic_t __count; /* reference count */
689 atomic_t processes; /* How many processes does this user have? */
690 atomic_t files; /* How many open files does this user have? */
691 atomic_t sigpending; /* How many pending signals does this user have? */
2d9048e2 692#ifdef CONFIG_INOTIFY_USER
0eeca283
RL
693 atomic_t inotify_watches; /* How many inotify watches does this user have? */
694 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
695#endif
4afeff85
EP
696#ifdef CONFIG_FANOTIFY
697 atomic_t fanotify_listeners;
698#endif
7ef9964e 699#ifdef CONFIG_EPOLL
52bd19f7 700 atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
7ef9964e 701#endif
970a8645 702#ifdef CONFIG_POSIX_MQUEUE
1da177e4
LT
703 /* protected by mq_lock */
704 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
970a8645 705#endif
1da177e4
LT
706 unsigned long locked_shm; /* How many pages of mlocked shm ? */
707
708#ifdef CONFIG_KEYS
709 struct key *uid_keyring; /* UID specific keyring */
710 struct key *session_keyring; /* UID's default session keyring */
711#endif
712
713 /* Hash table maintenance information */
735de223 714 struct hlist_node uidhash_node;
1da177e4 715 uid_t uid;
18b6e041 716 struct user_namespace *user_ns;
24e377a8 717
cdd6c482 718#ifdef CONFIG_PERF_EVENTS
789f90fc
PZ
719 atomic_long_t locked_vm;
720#endif
1da177e4
LT
721};
722
eb41d946 723extern int uids_sysfs_init(void);
5cb350ba 724
1da177e4
LT
725extern struct user_struct *find_user(uid_t);
726
727extern struct user_struct root_user;
728#define INIT_USER (&root_user)
729
b6dff3ec 730
1da177e4
LT
731struct backing_dev_info;
732struct reclaim_state;
733
52f17b6c 734#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1da177e4
LT
735struct sched_info {
736 /* cumulative counters */
2d72376b 737 unsigned long pcount; /* # of times run on this cpu */
9c2c4802 738 unsigned long long run_delay; /* time spent waiting on a runqueue */
1da177e4
LT
739
740 /* timestamps */
172ba844
BS
741 unsigned long long last_arrival,/* when we last ran on a cpu */
742 last_queued; /* when we were last queued to run */
1da177e4 743};
52f17b6c 744#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
1da177e4 745
ca74e92b
SN
746#ifdef CONFIG_TASK_DELAY_ACCT
747struct task_delay_info {
748 spinlock_t lock;
749 unsigned int flags; /* Private per-task flags */
750
751 /* For each stat XXX, add following, aligned appropriately
752 *
753 * struct timespec XXX_start, XXX_end;
754 * u64 XXX_delay;
755 * u32 XXX_count;
756 *
757 * Atomicity of updates to XXX_delay, XXX_count protected by
758 * single lock above (split into XXX_lock if contention is an issue).
759 */
0ff92245
SN
760
761 /*
762 * XXX_count is incremented on every XXX operation, the delay
763 * associated with the operation is added to XXX_delay.
764 * XXX_delay contains the accumulated delay time in nanoseconds.
765 */
766 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
767 u64 blkio_delay; /* wait for sync block io completion */
768 u64 swapin_delay; /* wait for swapin block io completion */
769 u32 blkio_count; /* total count of the number of sync block */
770 /* io operations performed */
771 u32 swapin_count; /* total count of the number of swapin block */
772 /* io operations performed */
873b4771
KK
773
774 struct timespec freepages_start, freepages_end;
775 u64 freepages_delay; /* wait for memory reclaim */
776 u32 freepages_count; /* total count of memory reclaim */
ca74e92b 777};
52f17b6c
CS
778#endif /* CONFIG_TASK_DELAY_ACCT */
779
780static inline int sched_info_on(void)
781{
782#ifdef CONFIG_SCHEDSTATS
783 return 1;
784#elif defined(CONFIG_TASK_DELAY_ACCT)
785 extern int delayacct_on;
786 return delayacct_on;
787#else
788 return 0;
ca74e92b 789#endif
52f17b6c 790}
ca74e92b 791
d15bcfdb
IM
792enum cpu_idle_type {
793 CPU_IDLE,
794 CPU_NOT_IDLE,
795 CPU_NEWLY_IDLE,
796 CPU_MAX_IDLE_TYPES
1da177e4
LT
797};
798
799/*
c8b28116
NR
800 * Increase resolution of nice-level calculations for 64-bit architectures.
801 * The extra resolution improves shares distribution and load balancing of
802 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
803 * hierarchies, especially on larger systems. This is not a user-visible change
804 * and does not change the user-interface for setting shares/weights.
805 *
806 * We increase resolution only if we have enough bits to allow this increased
807 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
808 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
809 * increased costs.
1da177e4 810 */
c8b28116
NR
811#if BITS_PER_LONG > 32
812# define SCHED_LOAD_RESOLUTION 10
813# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
814# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
815#else
816# define SCHED_LOAD_RESOLUTION 0
817# define scale_load(w) (w)
818# define scale_load_down(w) (w)
819#endif
9aa7b369 820
c8b28116 821#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
9aa7b369
IM
822#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
823
1399fa78
NR
824/*
825 * Increase resolution of cpu_power calculations
826 */
827#define SCHED_POWER_SHIFT 10
828#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
1da177e4 829
1399fa78
NR
830/*
831 * sched-domains (multiprocessor balancing) declarations:
832 */
2dd73a4f 833#ifdef CONFIG_SMP
b5d978e0
PZ
834#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
835#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
836#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
837#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
c88d5910 838#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
b5d978e0 839#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
59abf026 840#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */
b5d978e0
PZ
841#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
842#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
843#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
844#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
532cb4c4 845#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
b5d978e0 846#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
5c45bf27 847
afb8a9b7
GS
848enum powersavings_balance_level {
849 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
850 POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
851 * first for long running threads
852 */
853 POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
854 * cpu package for power savings
855 */
856 MAX_POWERSAVINGS_BALANCE_LEVELS
857};
89c4710e 858
716707b2 859extern int sched_mc_power_savings, sched_smt_power_savings;
89c4710e 860
716707b2
VS
861static inline int sd_balance_for_mc_power(void)
862{
863 if (sched_smt_power_savings)
864 return SD_POWERSAVINGS_BALANCE;
5c45bf27 865
28f53181
VS
866 if (!sched_mc_power_savings)
867 return SD_PREFER_SIBLING;
868
869 return 0;
716707b2 870}
89c4710e 871
716707b2
VS
872static inline int sd_balance_for_package_power(void)
873{
874 if (sched_mc_power_savings | sched_smt_power_savings)
875 return SD_POWERSAVINGS_BALANCE;
876
b5d978e0 877 return SD_PREFER_SIBLING;
716707b2 878}
5c45bf27 879
532cb4c4
MN
880extern int __weak arch_sd_sibiling_asym_packing(void);
881
100fdaee
VS
882/*
883 * Optimise SD flags for power savings:
25985edc 884 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
100fdaee
VS
885 * Keep default SD flags if sched_{smt,mc}_power_saving=0
886 */
887
888static inline int sd_power_saving_flags(void)
889{
890 if (sched_mc_power_savings | sched_smt_power_savings)
891 return SD_BALANCE_NEWIDLE;
892
893 return 0;
894}
1da177e4
LT
895
896struct sched_group {
897 struct sched_group *next; /* Must be a circular list */
dce840a0 898 atomic_t ref;
1da177e4
LT
899
900 /*
901 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
18a3885f 902 * single CPU.
5517d86b 903 */
9d5efe05 904 unsigned int cpu_power, cpu_power_orig;
aae6d3dd 905 unsigned int group_weight;
6c99e9ad 906
4200efd9
IM
907 /*
908 * The CPUs this group covers.
909 *
910 * NOTE: this field is variable length. (Allocated dynamically
911 * by attaching extra space to the end of the structure,
912 * depending on how many CPUs the kernel has booted up with)
4200efd9
IM
913 */
914 unsigned long cpumask[0];
1da177e4
LT
915};
916
758b2cdc
RR
917static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
918{
6c99e9ad 919 return to_cpumask(sg->cpumask);
758b2cdc
RR
920}
921
1d3504fc
HS
922struct sched_domain_attr {
923 int relax_domain_level;
924};
925
926#define SD_ATTR_INIT (struct sched_domain_attr) { \
927 .relax_domain_level = -1, \
928}
929
60495e77
PZ
930extern int sched_domain_level_max;
931
1da177e4
LT
932struct sched_domain {
933 /* These fields must be setup */
934 struct sched_domain *parent; /* top domain must be null terminated */
1a848870 935 struct sched_domain *child; /* bottom domain must be null terminated */
1da177e4 936 struct sched_group *groups; /* the balancing groups of the domain */
1da177e4
LT
937 unsigned long min_interval; /* Minimum balance interval ms */
938 unsigned long max_interval; /* Maximum balance interval ms */
939 unsigned int busy_factor; /* less balancing by factor if busy */
940 unsigned int imbalance_pct; /* No balance until over watermark */
1da177e4 941 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
7897986b
NP
942 unsigned int busy_idx;
943 unsigned int idle_idx;
944 unsigned int newidle_idx;
945 unsigned int wake_idx;
147cbb4b 946 unsigned int forkexec_idx;
a52bfd73 947 unsigned int smt_gain;
1da177e4 948 int flags; /* See SD_* */
60495e77 949 int level;
1da177e4
LT
950
951 /* Runtime fields. */
952 unsigned long last_balance; /* init to jiffies. units in jiffies */
953 unsigned int balance_interval; /* initialise to 1. units in ms. */
954 unsigned int nr_balance_failed; /* initialise to 0 */
955
2398f2c6
PZ
956 u64 last_update;
957
1da177e4
LT
958#ifdef CONFIG_SCHEDSTATS
959 /* load_balance() stats */
480b9434
KC
960 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
961 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
962 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
963 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
964 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
965 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
966 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
967 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1da177e4
LT
968
969 /* Active load balancing */
480b9434
KC
970 unsigned int alb_count;
971 unsigned int alb_failed;
972 unsigned int alb_pushed;
1da177e4 973
68767a0a 974 /* SD_BALANCE_EXEC stats */
480b9434
KC
975 unsigned int sbe_count;
976 unsigned int sbe_balanced;
977 unsigned int sbe_pushed;
1da177e4 978
68767a0a 979 /* SD_BALANCE_FORK stats */
480b9434
KC
980 unsigned int sbf_count;
981 unsigned int sbf_balanced;
982 unsigned int sbf_pushed;
68767a0a 983
1da177e4 984 /* try_to_wake_up() stats */
480b9434
KC
985 unsigned int ttwu_wake_remote;
986 unsigned int ttwu_move_affine;
987 unsigned int ttwu_move_balance;
1da177e4 988#endif
a5d8c348
IM
989#ifdef CONFIG_SCHED_DEBUG
990 char *name;
991#endif
dce840a0
PZ
992 union {
993 void *private; /* used during construction */
994 struct rcu_head rcu; /* used during destruction */
995 };
6c99e9ad 996
669c55e9 997 unsigned int span_weight;
4200efd9
IM
998 /*
999 * Span of all CPUs in this domain.
1000 *
1001 * NOTE: this field is variable length. (Allocated dynamically
1002 * by attaching extra space to the end of the structure,
1003 * depending on how many CPUs the kernel has booted up with)
4200efd9
IM
1004 */
1005 unsigned long span[0];
1da177e4
LT
1006};
1007
758b2cdc
RR
1008static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1009{
6c99e9ad 1010 return to_cpumask(sd->span);
758b2cdc
RR
1011}
1012
acc3f5d7 1013extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 1014 struct sched_domain_attr *dattr_new);
029190c5 1015
acc3f5d7
RR
1016/* Allocate an array of sched domains, for partition_sched_domains(). */
1017cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1018void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1019
06aaf76a
IM
1020/* Test a flag in parent sched domain */
1021static inline int test_sd_parent(struct sched_domain *sd, int flag)
1022{
1023 if (sd->parent && (sd->parent->flags & flag))
1024 return 1;
1025
1026 return 0;
1027}
029190c5 1028
47fe38fc
PZ
1029unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1030unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1031
1b427c15 1032#else /* CONFIG_SMP */
1da177e4 1033
1b427c15 1034struct sched_domain_attr;
d02c7a8c 1035
1b427c15 1036static inline void
acc3f5d7 1037partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1b427c15
IM
1038 struct sched_domain_attr *dattr_new)
1039{
d02c7a8c 1040}
1b427c15 1041#endif /* !CONFIG_SMP */
1da177e4 1042
47fe38fc 1043
1da177e4 1044struct io_context; /* See blkdev.h */
1da177e4 1045
1da177e4 1046
383f2835 1047#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
36c8b586 1048extern void prefetch_stack(struct task_struct *t);
383f2835
KC
1049#else
1050static inline void prefetch_stack(struct task_struct *t) { }
1051#endif
1da177e4
LT
1052
1053struct audit_context; /* See audit.c */
1054struct mempolicy;
b92ce558 1055struct pipe_inode_info;
4865ecf1 1056struct uts_namespace;
1da177e4 1057
20b8a59f
IM
1058struct rq;
1059struct sched_domain;
1060
7d478721
PZ
1061/*
1062 * wake flags
1063 */
1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
a7558e01 1065#define WF_FORK 0x02 /* child wakeup after fork */
f339b9dc 1066#define WF_MIGRATED 0x04 /* internal use, task got migrated */
7d478721 1067
371fd7e7 1068#define ENQUEUE_WAKEUP 1
74f8e4b2
PZ
1069#define ENQUEUE_HEAD 2
1070#ifdef CONFIG_SMP
1071#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
1072#else
1073#define ENQUEUE_WAKING 0
1074#endif
371fd7e7
PZ
1075
1076#define DEQUEUE_SLEEP 1
1077
20b8a59f 1078struct sched_class {
5522d5d5 1079 const struct sched_class *next;
20b8a59f 1080
371fd7e7
PZ
1081 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1082 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
4530d7ab 1083 void (*yield_task) (struct rq *rq);
d95f4122 1084 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
20b8a59f 1085
7d478721 1086 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
20b8a59f 1087
fb8d4724 1088 struct task_struct * (*pick_next_task) (struct rq *rq);
31ee529c 1089 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
20b8a59f 1090
681f3e68 1091#ifdef CONFIG_SMP
7608dec2 1092 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
4ce72a2c 1093
9a897c5a
SR
1094 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1095 void (*post_schedule) (struct rq *this_rq);
74f8e4b2 1096 void (*task_waking) (struct task_struct *task);
efbbd05a 1097 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
e1d1484f 1098
cd8ba7cd 1099 void (*set_cpus_allowed)(struct task_struct *p,
96f874e2 1100 const struct cpumask *newmask);
57d885fe 1101
1f11eb6a
GH
1102 void (*rq_online)(struct rq *rq);
1103 void (*rq_offline)(struct rq *rq);
4ce72a2c
LZ
1104#endif
1105
1106 void (*set_curr_task) (struct rq *rq);
1107 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
cd29fe6f 1108 void (*task_fork) (struct task_struct *p);
cb469845 1109
da7a735e
PZ
1110 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1111 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
cb469845 1112 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
da7a735e 1113 int oldprio);
810b3817 1114
dba091b9
TG
1115 unsigned int (*get_rr_interval) (struct rq *rq,
1116 struct task_struct *task);
0d721cea 1117
810b3817 1118#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 1119 void (*task_move_group) (struct task_struct *p, int on_rq);
810b3817 1120#endif
20b8a59f
IM
1121};
1122
1123struct load_weight {
1124 unsigned long weight, inv_weight;
1125};
1126
94c18227 1127#ifdef CONFIG_SCHEDSTATS
41acab88 1128struct sched_statistics {
20b8a59f 1129 u64 wait_start;
94c18227 1130 u64 wait_max;
6d082592
AV
1131 u64 wait_count;
1132 u64 wait_sum;
8f0dfc34
AV
1133 u64 iowait_count;
1134 u64 iowait_sum;
94c18227 1135
20b8a59f 1136 u64 sleep_start;
20b8a59f 1137 u64 sleep_max;
94c18227
IM
1138 s64 sum_sleep_runtime;
1139
1140 u64 block_start;
20b8a59f
IM
1141 u64 block_max;
1142 u64 exec_max;
eba1ed4b 1143 u64 slice_max;
cc367732 1144
cc367732
IM
1145 u64 nr_migrations_cold;
1146 u64 nr_failed_migrations_affine;
1147 u64 nr_failed_migrations_running;
1148 u64 nr_failed_migrations_hot;
1149 u64 nr_forced_migrations;
cc367732
IM
1150
1151 u64 nr_wakeups;
1152 u64 nr_wakeups_sync;
1153 u64 nr_wakeups_migrate;
1154 u64 nr_wakeups_local;
1155 u64 nr_wakeups_remote;
1156 u64 nr_wakeups_affine;
1157 u64 nr_wakeups_affine_attempts;
1158 u64 nr_wakeups_passive;
1159 u64 nr_wakeups_idle;
41acab88
LDM
1160};
1161#endif
1162
1163struct sched_entity {
1164 struct load_weight load; /* for load-balancing */
1165 struct rb_node run_node;
1166 struct list_head group_node;
1167 unsigned int on_rq;
1168
1169 u64 exec_start;
1170 u64 sum_exec_runtime;
1171 u64 vruntime;
1172 u64 prev_sum_exec_runtime;
1173
41acab88
LDM
1174 u64 nr_migrations;
1175
41acab88
LDM
1176#ifdef CONFIG_SCHEDSTATS
1177 struct sched_statistics statistics;
94c18227
IM
1178#endif
1179
20b8a59f
IM
1180#ifdef CONFIG_FAIR_GROUP_SCHED
1181 struct sched_entity *parent;
1182 /* rq on which this entity is (to be) queued: */
1183 struct cfs_rq *cfs_rq;
1184 /* rq "owned" by this entity/group: */
1185 struct cfs_rq *my_q;
1186#endif
1187};
70b97a7f 1188
fa717060
PZ
1189struct sched_rt_entity {
1190 struct list_head run_list;
78f2c7db 1191 unsigned long timeout;
bee367ed 1192 unsigned int time_slice;
6f505b16
PZ
1193 int nr_cpus_allowed;
1194
58d6c2d7 1195 struct sched_rt_entity *back;
052f1dc7 1196#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
1197 struct sched_rt_entity *parent;
1198 /* rq on which this entity is (to be) queued: */
1199 struct rt_rq *rt_rq;
1200 /* rq "owned" by this entity/group: */
1201 struct rt_rq *my_q;
1202#endif
fa717060
PZ
1203};
1204
86848966
PM
1205struct rcu_node;
1206
8dc85d54
PZ
1207enum perf_event_task_context {
1208 perf_invalid_context = -1,
1209 perf_hw_context = 0,
89a1e187 1210 perf_sw_context,
8dc85d54
PZ
1211 perf_nr_task_contexts,
1212};
1213
1da177e4
LT
1214struct task_struct {
1215 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
f7e4217b 1216 void *stack;
1da177e4 1217 atomic_t usage;
97dc32cd
WC
1218 unsigned int flags; /* per process flags, defined below */
1219 unsigned int ptrace;
1da177e4 1220
2dd73a4f 1221#ifdef CONFIG_SMP
317f3941 1222 struct task_struct *wake_entry;
3ca7a440 1223 int on_cpu;
2dd73a4f 1224#endif
fd2f4419 1225 int on_rq;
50e645a8 1226
b29739f9 1227 int prio, static_prio, normal_prio;
c7aceaba 1228 unsigned int rt_priority;
5522d5d5 1229 const struct sched_class *sched_class;
20b8a59f 1230 struct sched_entity se;
fa717060 1231 struct sched_rt_entity rt;
1da177e4 1232
e107be36
AK
1233#ifdef CONFIG_PREEMPT_NOTIFIERS
1234 /* list of struct preempt_notifier: */
1235 struct hlist_head preempt_notifiers;
1236#endif
1237
18796aa0
AD
1238 /*
1239 * fpu_counter contains the number of consecutive context switches
1240 * that the FPU is used. If this is over a threshold, the lazy fpu
1241 * saving becomes unlazy to save the trap. This is an unsigned char
1242 * so that after 256 times the counter wraps and the behavior turns
1243 * lazy again; this to deal with bursty apps that only use FPU for
1244 * a short time
1245 */
1246 unsigned char fpu_counter;
6c5c9341 1247#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 1248 unsigned int btrace_seq;
6c5c9341 1249#endif
1da177e4 1250
97dc32cd 1251 unsigned int policy;
1da177e4 1252 cpumask_t cpus_allowed;
1da177e4 1253
a57eb940 1254#ifdef CONFIG_PREEMPT_RCU
e260be67 1255 int rcu_read_lock_nesting;
f41d911f 1256 char rcu_read_unlock_special;
f41d911f 1257 struct list_head rcu_node_entry;
a57eb940
PM
1258#endif /* #ifdef CONFIG_PREEMPT_RCU */
1259#ifdef CONFIG_TREE_PREEMPT_RCU
1260 struct rcu_node *rcu_blocked_node;
f41d911f 1261#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
24278d14
PM
1262#ifdef CONFIG_RCU_BOOST
1263 struct rt_mutex *rcu_boost_mutex;
1264#endif /* #ifdef CONFIG_RCU_BOOST */
e260be67 1265
52f17b6c 1266#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1da177e4
LT
1267 struct sched_info sched_info;
1268#endif
1269
1270 struct list_head tasks;
806c09a7 1271#ifdef CONFIG_SMP
917b627d 1272 struct plist_node pushable_tasks;
806c09a7 1273#endif
1da177e4
LT
1274
1275 struct mm_struct *mm, *active_mm;
4471a675
JK
1276#ifdef CONFIG_COMPAT_BRK
1277 unsigned brk_randomized:1;
1278#endif
34e55232
KH
1279#if defined(SPLIT_RSS_COUNTING)
1280 struct task_rss_stat rss_stat;
1281#endif
1da177e4 1282/* task state */
97dc32cd 1283 int exit_state;
1da177e4
LT
1284 int exit_code, exit_signal;
1285 int pdeath_signal; /* The signal sent when the parent dies */
e5c1902e 1286 unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
1da177e4 1287 /* ??? */
97dc32cd 1288 unsigned int personality;
1da177e4 1289 unsigned did_exec:1;
f9ce1f1c
KT
1290 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1291 * execve */
8f0dfc34
AV
1292 unsigned in_iowait:1;
1293
ca94c442
LP
1294
1295 /* Revert to default priority/policy when forking */
1296 unsigned sched_reset_on_fork:1;
a8e4f2ea 1297 unsigned sched_contributes_to_load:1;
ca94c442 1298
1da177e4
LT
1299 pid_t pid;
1300 pid_t tgid;
0a425405 1301
1314562a 1302#ifdef CONFIG_CC_STACKPROTECTOR
0a425405
AV
1303 /* Canary value for the -fstack-protector gcc feature */
1304 unsigned long stack_canary;
1314562a 1305#endif
e0032087 1306
1da177e4
LT
1307 /*
1308 * pointers to (original) parent process, youngest child, younger sibling,
1309 * older sibling, respectively. (p->father can be replaced with
f470021a 1310 * p->real_parent->pid)
1da177e4 1311 */
f470021a
RM
1312 struct task_struct *real_parent; /* real parent process */
1313 struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1da177e4 1314 /*
f470021a 1315 * children/sibling forms the list of my natural children
1da177e4
LT
1316 */
1317 struct list_head children; /* list of my children */
1318 struct list_head sibling; /* linkage in my parent's children list */
1319 struct task_struct *group_leader; /* threadgroup leader */
1320
f470021a
RM
1321 /*
1322 * ptraced is the list of tasks this task is using ptrace on.
1323 * This includes both natural children and PTRACE_ATTACH targets.
1324 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1325 */
1326 struct list_head ptraced;
1327 struct list_head ptrace_entry;
1328
1da177e4 1329 /* PID/PID hash table linkage. */
92476d7f 1330 struct pid_link pids[PIDTYPE_MAX];
47e65328 1331 struct list_head thread_group;
1da177e4
LT
1332
1333 struct completion *vfork_done; /* for vfork() */
1334 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
1335 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
1336
c66f08be 1337 cputime_t utime, stime, utimescaled, stimescaled;
9ac52315 1338 cputime_t gtime;
d99ca3b9 1339#ifndef CONFIG_VIRT_CPU_ACCOUNTING
9301899b 1340 cputime_t prev_utime, prev_stime;
d99ca3b9 1341#endif
1da177e4 1342 unsigned long nvcsw, nivcsw; /* context switch counts */
924b42d5
TJ
1343 struct timespec start_time; /* monotonic time */
1344 struct timespec real_start_time; /* boot based time */
1da177e4
LT
1345/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1346 unsigned long min_flt, maj_flt;
1347
f06febc9 1348 struct task_cputime cputime_expires;
1da177e4
LT
1349 struct list_head cpu_timers[3];
1350
1351/* process credentials */
1b0ba1c9 1352 const struct cred __rcu *real_cred; /* objective and real subjective task
3b11a1de 1353 * credentials (COW) */
1b0ba1c9 1354 const struct cred __rcu *cred; /* effective (overridable) subjective task
3b11a1de 1355 * credentials (COW) */
ee18d64c 1356 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
b6dff3ec 1357
36772092
PBG
1358 char comm[TASK_COMM_LEN]; /* executable name excluding path
1359 - access with [gs]et_task_comm (which lock
1360 it with task_lock())
221af7f8 1361 - initialized normally by setup_new_exec */
1da177e4
LT
1362/* file system info */
1363 int link_count, total_link_count;
3d5b6fcc 1364#ifdef CONFIG_SYSVIPC
1da177e4
LT
1365/* ipc stuff */
1366 struct sysv_sem sysvsem;
3d5b6fcc 1367#endif
e162b39a 1368#ifdef CONFIG_DETECT_HUNG_TASK
82a1fcb9 1369/* hung task detection */
82a1fcb9
IM
1370 unsigned long last_switch_count;
1371#endif
1da177e4
LT
1372/* CPU-specific state of this task */
1373 struct thread_struct thread;
1374/* filesystem information */
1375 struct fs_struct *fs;
1376/* open file information */
1377 struct files_struct *files;
1651e14e 1378/* namespaces */
ab516013 1379 struct nsproxy *nsproxy;
1da177e4
LT
1380/* signal handlers */
1381 struct signal_struct *signal;
1382 struct sighand_struct *sighand;
1383
1384 sigset_t blocked, real_blocked;
f3de272b 1385 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1da177e4
LT
1386 struct sigpending pending;
1387
1388 unsigned long sas_ss_sp;
1389 size_t sas_ss_size;
1390 int (*notifier)(void *priv);
1391 void *notifier_data;
1392 sigset_t *notifier_mask;
1da177e4 1393 struct audit_context *audit_context;
bfef93a5
AV
1394#ifdef CONFIG_AUDITSYSCALL
1395 uid_t loginuid;
4746ec5b 1396 unsigned int sessionid;
bfef93a5 1397#endif
1da177e4
LT
1398 seccomp_t seccomp;
1399
1400/* Thread group tracking */
1401 u32 parent_exec_id;
1402 u32 self_exec_id;
58568d2a
MX
1403/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1404 * mempolicy */
1da177e4 1405 spinlock_t alloc_lock;
1da177e4 1406
3aa551c9
TG
1407#ifdef CONFIG_GENERIC_HARDIRQS
1408 /* IRQ handler threads */
1409 struct irqaction *irqaction;
1410#endif
1411
b29739f9 1412 /* Protection of the PI data structures: */
1d615482 1413 raw_spinlock_t pi_lock;
b29739f9 1414
23f78d4a
IM
1415#ifdef CONFIG_RT_MUTEXES
1416 /* PI waiters blocked on a rt_mutex held by this task */
1417 struct plist_head pi_waiters;
1418 /* Deadlock detection and priority inheritance handling */
1419 struct rt_mutex_waiter *pi_blocked_on;
23f78d4a
IM
1420#endif
1421
408894ee
IM
1422#ifdef CONFIG_DEBUG_MUTEXES
1423 /* mutex deadlock detection */
1424 struct mutex_waiter *blocked_on;
1425#endif
de30a2b3
IM
1426#ifdef CONFIG_TRACE_IRQFLAGS
1427 unsigned int irq_events;
de30a2b3 1428 unsigned long hardirq_enable_ip;
de30a2b3 1429 unsigned long hardirq_disable_ip;
fa1452e8 1430 unsigned int hardirq_enable_event;
de30a2b3 1431 unsigned int hardirq_disable_event;
fa1452e8
HS
1432 int hardirqs_enabled;
1433 int hardirq_context;
de30a2b3 1434 unsigned long softirq_disable_ip;
de30a2b3 1435 unsigned long softirq_enable_ip;
fa1452e8 1436 unsigned int softirq_disable_event;
de30a2b3 1437 unsigned int softirq_enable_event;
fa1452e8 1438 int softirqs_enabled;
de30a2b3
IM
1439 int softirq_context;
1440#endif
fbb9ce95 1441#ifdef CONFIG_LOCKDEP
bdb9441e 1442# define MAX_LOCK_DEPTH 48UL
fbb9ce95
IM
1443 u64 curr_chain_key;
1444 int lockdep_depth;
fbb9ce95 1445 unsigned int lockdep_recursion;
c7aceaba 1446 struct held_lock held_locks[MAX_LOCK_DEPTH];
cf40bd16 1447 gfp_t lockdep_reclaim_gfp;
fbb9ce95 1448#endif
408894ee 1449
1da177e4
LT
1450/* journalling filesystem info */
1451 void *journal_info;
1452
d89d8796 1453/* stacked block device info */
bddd87c7 1454 struct bio_list *bio_list;
d89d8796 1455
73c10101
JA
1456#ifdef CONFIG_BLOCK
1457/* stack plugging */
1458 struct blk_plug *plug;
1459#endif
1460
1da177e4
LT
1461/* VM state */
1462 struct reclaim_state *reclaim_state;
1463
1da177e4
LT
1464 struct backing_dev_info *backing_dev_info;
1465
1466 struct io_context *io_context;
1467
1468 unsigned long ptrace_message;
1469 siginfo_t *last_siginfo; /* For ptrace use. */
7c3ab738 1470 struct task_io_accounting ioac;
8f0ab514 1471#if defined(CONFIG_TASK_XACCT)
1da177e4
LT
1472 u64 acct_rss_mem1; /* accumulated rss usage */
1473 u64 acct_vm_mem1; /* accumulated virtual memory usage */
49b5cf34 1474 cputime_t acct_timexpd; /* stime + utime since last update */
1da177e4
LT
1475#endif
1476#ifdef CONFIG_CPUSETS
58568d2a 1477 nodemask_t mems_allowed; /* Protected by alloc_lock */
c0ff7453 1478 int mems_allowed_change_disable;
825a46af 1479 int cpuset_mem_spread_rotor;
6adef3eb 1480 int cpuset_slab_spread_rotor;
1da177e4 1481#endif
ddbcc7e8 1482#ifdef CONFIG_CGROUPS
817929ec 1483 /* Control Group info protected by css_set_lock */
2c392b8c 1484 struct css_set __rcu *cgroups;
817929ec
PM
1485 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1486 struct list_head cg_list;
ddbcc7e8 1487#endif
42b2dd0a 1488#ifdef CONFIG_FUTEX
0771dfef 1489 struct robust_list_head __user *robust_list;
34f192c6
IM
1490#ifdef CONFIG_COMPAT
1491 struct compat_robust_list_head __user *compat_robust_list;
1492#endif
c87e2837
IM
1493 struct list_head pi_state_list;
1494 struct futex_pi_state *pi_state_cache;
c7aceaba 1495#endif
cdd6c482 1496#ifdef CONFIG_PERF_EVENTS
8dc85d54 1497 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
cdd6c482
IM
1498 struct mutex perf_event_mutex;
1499 struct list_head perf_event_list;
a63eaf34 1500#endif
c7aceaba 1501#ifdef CONFIG_NUMA
58568d2a 1502 struct mempolicy *mempolicy; /* Protected by alloc_lock */
c7aceaba 1503 short il_next;
207205a2 1504 short pref_node_fork;
42b2dd0a 1505#endif
22e2c507 1506 atomic_t fs_excl; /* holding fs exclusive resources */
e56d0903 1507 struct rcu_head rcu;
b92ce558
JA
1508
1509 /*
1510 * cache last used pipe for splice
1511 */
1512 struct pipe_inode_info *splice_pipe;
ca74e92b
SN
1513#ifdef CONFIG_TASK_DELAY_ACCT
1514 struct task_delay_info *delays;
f4f154fd
AM
1515#endif
1516#ifdef CONFIG_FAULT_INJECTION
1517 int make_it_fail;
ca74e92b 1518#endif
3e26c149 1519 struct prop_local_single dirties;
9745512c
AV
1520#ifdef CONFIG_LATENCYTOP
1521 int latency_record_count;
1522 struct latency_record latency_record[LT_SAVECOUNT];
1523#endif
6976675d
AV
1524 /*
1525 * time slack values; these are used to round up poll() and
1526 * select() etc timeout values. These are in nanoseconds.
1527 */
1528 unsigned long timer_slack_ns;
1529 unsigned long default_timer_slack_ns;
f8d570a4
DM
1530
1531 struct list_head *scm_work_list;
fb52607a 1532#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3ad2f3fb 1533 /* Index of current stored address in ret_stack */
f201ae23
FW
1534 int curr_ret_stack;
1535 /* Stack of return addresses for return function tracing */
1536 struct ftrace_ret_stack *ret_stack;
8aef2d28
SR
1537 /* time stamp for last schedule */
1538 unsigned long long ftrace_timestamp;
f201ae23
FW
1539 /*
1540 * Number of functions that haven't been traced
1541 * because of depth overrun.
1542 */
1543 atomic_t trace_overrun;
380c4b14
FW
1544 /* Pause for the tracing */
1545 atomic_t tracing_graph_pause;
f201ae23 1546#endif
ea4e2bc4
SR
1547#ifdef CONFIG_TRACING
1548 /* state flags for use by tracers */
1549 unsigned long trace;
261842b7
SR
1550 /* bitmask of trace recursion */
1551 unsigned long trace_recursion;
1552#endif /* CONFIG_TRACING */
569b846d
KH
1553#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1554 struct memcg_batch_info {
1555 int do_batch; /* incremented when batch uncharge started */
1556 struct mem_cgroup *memcg; /* target memcg of uncharge */
7ffd4ca7
JW
1557 unsigned long nr_pages; /* uncharged usage */
1558 unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
569b846d
KH
1559 } memcg_batch;
1560#endif
bf26c018
FW
1561#ifdef CONFIG_HAVE_HW_BREAKPOINT
1562 atomic_t ptrace_bp_refcnt;
1563#endif
1da177e4
LT
1564};
1565
76e6eee0 1566/* Future-safe accessor for struct task_struct's cpus_allowed. */
a4636818 1567#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
76e6eee0 1568
e05606d3
IM
1569/*
1570 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1571 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1572 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1573 * values are inverted: lower p->prio value means higher priority.
1574 *
1575 * The MAX_USER_RT_PRIO value allows the actual maximum
1576 * RT priority to be separate from the value exported to
1577 * user-space. This allows kernel threads to set their
1578 * priority to a value higher than any user task. Note:
1579 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1580 */
1581
1582#define MAX_USER_RT_PRIO 100
1583#define MAX_RT_PRIO MAX_USER_RT_PRIO
1584
1585#define MAX_PRIO (MAX_RT_PRIO + 40)
1586#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1587
1588static inline int rt_prio(int prio)
1589{
1590 if (unlikely(prio < MAX_RT_PRIO))
1591 return 1;
1592 return 0;
1593}
1594
e868171a 1595static inline int rt_task(struct task_struct *p)
e05606d3
IM
1596{
1597 return rt_prio(p->prio);
1598}
1599
e868171a 1600static inline struct pid *task_pid(struct task_struct *task)
22c935f4
EB
1601{
1602 return task->pids[PIDTYPE_PID].pid;
1603}
1604
e868171a 1605static inline struct pid *task_tgid(struct task_struct *task)
22c935f4
EB
1606{
1607 return task->group_leader->pids[PIDTYPE_PID].pid;
1608}
1609
6dda81f4
ON
1610/*
1611 * Without tasklist or rcu lock it is not safe to dereference
1612 * the result of task_pgrp/task_session even if task == current,
1613 * we can race with another thread doing sys_setsid/sys_setpgid.
1614 */
e868171a 1615static inline struct pid *task_pgrp(struct task_struct *task)
22c935f4
EB
1616{
1617 return task->group_leader->pids[PIDTYPE_PGID].pid;
1618}
1619
e868171a 1620static inline struct pid *task_session(struct task_struct *task)
22c935f4
EB
1621{
1622 return task->group_leader->pids[PIDTYPE_SID].pid;
1623}
1624
7af57294
PE
1625struct pid_namespace;
1626
1627/*
1628 * the helpers to get the task's different pids as they are seen
1629 * from various namespaces
1630 *
1631 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
44c4e1b2
EB
1632 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1633 * current.
7af57294
PE
1634 * task_xid_nr_ns() : id seen from the ns specified;
1635 *
1636 * set_task_vxid() : assigns a virtual id to a task;
1637 *
7af57294
PE
1638 * see also pid_nr() etc in include/linux/pid.h
1639 */
52ee2dfd
ON
1640pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1641 struct pid_namespace *ns);
7af57294 1642
e868171a 1643static inline pid_t task_pid_nr(struct task_struct *tsk)
7af57294
PE
1644{
1645 return tsk->pid;
1646}
1647
52ee2dfd
ON
1648static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1649 struct pid_namespace *ns)
1650{
1651 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1652}
7af57294
PE
1653
1654static inline pid_t task_pid_vnr(struct task_struct *tsk)
1655{
52ee2dfd 1656 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
7af57294
PE
1657}
1658
1659
e868171a 1660static inline pid_t task_tgid_nr(struct task_struct *tsk)
7af57294
PE
1661{
1662 return tsk->tgid;
1663}
1664
2f2a3a46 1665pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
7af57294
PE
1666
1667static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1668{
1669 return pid_vnr(task_tgid(tsk));
1670}
1671
1672
52ee2dfd
ON
1673static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1674 struct pid_namespace *ns)
7af57294 1675{
52ee2dfd 1676 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
7af57294
PE
1677}
1678
7af57294
PE
1679static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1680{
52ee2dfd 1681 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
7af57294
PE
1682}
1683
1684
52ee2dfd
ON
1685static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1686 struct pid_namespace *ns)
7af57294 1687{
52ee2dfd 1688 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
7af57294
PE
1689}
1690
7af57294
PE
1691static inline pid_t task_session_vnr(struct task_struct *tsk)
1692{
52ee2dfd 1693 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
7af57294
PE
1694}
1695
1b0f7ffd
ON
1696/* obsolete, do not use */
1697static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1698{
1699 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1700}
7af57294 1701
1da177e4
LT
1702/**
1703 * pid_alive - check that a task structure is not stale
1704 * @p: Task structure to be checked.
1705 *
1706 * Test if a process is not yet dead (at most zombie state)
1707 * If pid_alive fails, then pointers within the task structure
1708 * can be stale and must not be dereferenced.
1709 */
e868171a 1710static inline int pid_alive(struct task_struct *p)
1da177e4 1711{
92476d7f 1712 return p->pids[PIDTYPE_PID].pid != NULL;
1da177e4
LT
1713}
1714
f400e198 1715/**
b460cbc5 1716 * is_global_init - check if a task structure is init
3260259f
HK
1717 * @tsk: Task structure to be checked.
1718 *
1719 * Check if a task structure is the first user space task the kernel created.
b460cbc5 1720 */
e868171a 1721static inline int is_global_init(struct task_struct *tsk)
b461cc03
PE
1722{
1723 return tsk->pid == 1;
1724}
b460cbc5
SH
1725
1726/*
1727 * is_container_init:
1728 * check whether in the task is init in its own pid namespace.
f400e198 1729 */
b461cc03 1730extern int is_container_init(struct task_struct *tsk);
f400e198 1731
9ec52099
CLG
1732extern struct pid *cad_pid;
1733
1da177e4 1734extern void free_task(struct task_struct *tsk);
1da177e4 1735#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
e56d0903 1736
158d9ebd 1737extern void __put_task_struct(struct task_struct *t);
e56d0903
IM
1738
1739static inline void put_task_struct(struct task_struct *t)
1740{
1741 if (atomic_dec_and_test(&t->usage))
8c7904a0 1742 __put_task_struct(t);
e56d0903 1743}
1da177e4 1744
d180c5bc 1745extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
0cf55e1e 1746extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
49048622 1747
1da177e4
LT
1748/*
1749 * Per process flags
1750 */
1da177e4
LT
1751#define PF_STARTING 0x00000002 /* being created */
1752#define PF_EXITING 0x00000004 /* getting shut down */
778e9a9c 1753#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
94886b84 1754#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
21aa9af0 1755#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1da177e4 1756#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
4db96cf0 1757#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1da177e4
LT
1758#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1759#define PF_DUMPCORE 0x00000200 /* dumped core */
1760#define PF_SIGNALED 0x00000400 /* killed by a signal */
1761#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1da177e4 1762#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
6301cb95 1763#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
1da177e4
LT
1764#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1765#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1766#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1767#define PF_KSWAPD 0x00040000 /* I am kswapd */
1da177e4 1768#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
246bb0b1 1769#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
b31dc66a
JA
1770#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1771#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1772#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
1773#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
9985b0ba 1774#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
4db96cf0 1775#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
c61afb18 1776#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
61a87122 1777#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
58a69cb4 1778#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
ebb12db5 1779#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1da177e4
LT
1780
1781/*
1782 * Only the _current_ task can read/write to tsk->flags, but other
1783 * tasks can access tsk->flags in readonly mode for example
1784 * with tsk_used_math (like during threaded core dumping).
1785 * There is however an exception to this rule during ptrace
1786 * or during fork: the ptracer task is allowed to write to the
1787 * child->flags of its traced child (same goes for fork, the parent
1788 * can write to the child->flags), because we're guaranteed the
1789 * child is not running and in turn not changing child->flags
1790 * at the same time the parent does it.
1791 */
1792#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1793#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1794#define clear_used_math() clear_stopped_child_used_math(current)
1795#define set_used_math() set_stopped_child_used_math(current)
1796#define conditional_stopped_child_used_math(condition, child) \
1797 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1798#define conditional_used_math(condition) \
1799 conditional_stopped_child_used_math(condition, current)
1800#define copy_to_stopped_child_used_math(child) \
1801 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1802/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1803#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1804#define used_math() tsk_used_math(current)
1805
e5c1902e
TH
1806/*
1807 * task->group_stop flags
1808 */
d79fdd6d 1809#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
39efa3ef 1810#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
e5c1902e 1811#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
d79fdd6d 1812#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
ee77f075 1813#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
e5c1902e 1814
39efa3ef
TH
1815extern void task_clear_group_stop_pending(struct task_struct *task);
1816
a57eb940 1817#ifdef CONFIG_PREEMPT_RCU
f41d911f
PM
1818
1819#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
24278d14
PM
1820#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */
1821#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
f41d911f
PM
1822
1823static inline void rcu_copy_process(struct task_struct *p)
1824{
1825 p->rcu_read_lock_nesting = 0;
1826 p->rcu_read_unlock_special = 0;
a57eb940 1827#ifdef CONFIG_TREE_PREEMPT_RCU
dd5d19ba 1828 p->rcu_blocked_node = NULL;
24278d14
PM
1829#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1830#ifdef CONFIG_RCU_BOOST
1831 p->rcu_boost_mutex = NULL;
1832#endif /* #ifdef CONFIG_RCU_BOOST */
f41d911f
PM
1833 INIT_LIST_HEAD(&p->rcu_node_entry);
1834}
1835
f41d911f
PM
1836#else
1837
1838static inline void rcu_copy_process(struct task_struct *p)
1839{
1840}
1841
1842#endif
1843
1da177e4 1844#ifdef CONFIG_SMP
1e1b6c51
KM
1845extern void do_set_cpus_allowed(struct task_struct *p,
1846 const struct cpumask *new_mask);
1847
cd8ba7cd 1848extern int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 1849 const struct cpumask *new_mask);
1da177e4 1850#else
1e1b6c51
KM
1851static inline void do_set_cpus_allowed(struct task_struct *p,
1852 const struct cpumask *new_mask)
1853{
1854}
cd8ba7cd 1855static inline int set_cpus_allowed_ptr(struct task_struct *p,
96f874e2 1856 const struct cpumask *new_mask)
1da177e4 1857{
96f874e2 1858 if (!cpumask_test_cpu(0, new_mask))
1da177e4
LT
1859 return -EINVAL;
1860 return 0;
1861}
1862#endif
e0ad9556
RR
1863
1864#ifndef CONFIG_CPUMASK_OFFSTACK
cd8ba7cd
MT
1865static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1866{
1867 return set_cpus_allowed_ptr(p, &new_mask);
1868}
e0ad9556 1869#endif
1da177e4 1870
b342501c 1871/*
c676329a
PZ
1872 * Do not use outside of architecture code which knows its limitations.
1873 *
1874 * sched_clock() has no promise of monotonicity or bounded drift between
1875 * CPUs, use (which you should not) requires disabling IRQs.
1876 *
1877 * Please use one of the three interfaces below.
b342501c 1878 */
1bbfa6f2 1879extern unsigned long long notrace sched_clock(void);
c676329a
PZ
1880/*
1881 * See the comment in kernel/sched_clock.c
1882 */
1883extern u64 cpu_clock(int cpu);
1884extern u64 local_clock(void);
1885extern u64 sched_clock_cpu(int cpu);
1886
e436d800 1887
c1955a3d 1888extern void sched_clock_init(void);
3e51f33f 1889
c1955a3d 1890#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
3e51f33f
PZ
1891static inline void sched_clock_tick(void)
1892{
1893}
1894
1895static inline void sched_clock_idle_sleep_event(void)
1896{
1897}
1898
1899static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1900{
1901}
1902#else
c676329a
PZ
1903/*
1904 * Architectures can set this to 1 if they have specified
1905 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1906 * but then during bootup it turns out that sched_clock()
1907 * is reliable after all:
1908 */
1909extern int sched_clock_stable;
1910
3e51f33f
PZ
1911extern void sched_clock_tick(void);
1912extern void sched_clock_idle_sleep_event(void);
1913extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1914#endif
1915
b52bfee4
VP
1916#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1917/*
1918 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
1919 * The reason for this explicit opt-in is not to have perf penalty with
1920 * slow sched_clocks.
1921 */
1922extern void enable_sched_clock_irqtime(void);
1923extern void disable_sched_clock_irqtime(void);
1924#else
1925static inline void enable_sched_clock_irqtime(void) {}
1926static inline void disable_sched_clock_irqtime(void) {}
1927#endif
1928
36c8b586 1929extern unsigned long long
41b86e9c 1930task_sched_runtime(struct task_struct *task);
f06febc9 1931extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1da177e4
LT
1932
1933/* sched_exec is called by processes performing an exec */
1934#ifdef CONFIG_SMP
1935extern void sched_exec(void);
1936#else
1937#define sched_exec() {}
1938#endif
1939
2aa44d05
IM
1940extern void sched_clock_idle_sleep_event(void);
1941extern void sched_clock_idle_wakeup_event(u64 delta_ns);
bb29ab26 1942
1da177e4
LT
1943#ifdef CONFIG_HOTPLUG_CPU
1944extern void idle_task_exit(void);
1945#else
1946static inline void idle_task_exit(void) {}
1947#endif
1948
06d8308c
TG
1949#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1950extern void wake_up_idle_cpu(int cpu);
1951#else
1952static inline void wake_up_idle_cpu(int cpu) { }
1953#endif
1954
21805085 1955extern unsigned int sysctl_sched_latency;
b2be5e96 1956extern unsigned int sysctl_sched_min_granularity;
bf0f6f24 1957extern unsigned int sysctl_sched_wakeup_granularity;
bf0f6f24 1958extern unsigned int sysctl_sched_child_runs_first;
1983a922
CE
1959
1960enum sched_tunable_scaling {
1961 SCHED_TUNABLESCALING_NONE,
1962 SCHED_TUNABLESCALING_LOG,
1963 SCHED_TUNABLESCALING_LINEAR,
1964 SCHED_TUNABLESCALING_END,
1965};
1966extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1967
2bba22c5 1968#ifdef CONFIG_SCHED_DEBUG
da84d961 1969extern unsigned int sysctl_sched_migration_cost;
b82d9fdd 1970extern unsigned int sysctl_sched_nr_migrate;
e9e9250b 1971extern unsigned int sysctl_sched_time_avg;
cd1bb94b 1972extern unsigned int sysctl_timer_migration;
a7a4f8a7 1973extern unsigned int sysctl_sched_shares_window;
b2be5e96 1974
1983a922 1975int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 1976 void __user *buffer, size_t *length,
b2be5e96 1977 loff_t *ppos);
2bd8e6d4 1978#endif
eea08f32
AB
1979#ifdef CONFIG_SCHED_DEBUG
1980static inline unsigned int get_sysctl_timer_migration(void)
1981{
1982 return sysctl_timer_migration;
1983}
1984#else
1985static inline unsigned int get_sysctl_timer_migration(void)
1986{
1987 return 1;
1988}
1989#endif
9f0c1e56
PZ
1990extern unsigned int sysctl_sched_rt_period;
1991extern int sysctl_sched_rt_runtime;
2bd8e6d4 1992
d0b27fa7 1993int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 1994 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
1995 loff_t *ppos);
1996
5091faa4
MG
1997#ifdef CONFIG_SCHED_AUTOGROUP
1998extern unsigned int sysctl_sched_autogroup_enabled;
1999
2000extern void sched_autogroup_create_attach(struct task_struct *p);
2001extern void sched_autogroup_detach(struct task_struct *p);
2002extern void sched_autogroup_fork(struct signal_struct *sig);
2003extern void sched_autogroup_exit(struct signal_struct *sig);
2004#ifdef CONFIG_PROC_FS
2005extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2006extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
2007#endif
2008#else
2009static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2010static inline void sched_autogroup_detach(struct task_struct *p) { }
2011static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2012static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2013#endif
2014
b29739f9 2015#ifdef CONFIG_RT_MUTEXES
36c8b586
IM
2016extern int rt_mutex_getprio(struct task_struct *p);
2017extern void rt_mutex_setprio(struct task_struct *p, int prio);
2018extern void rt_mutex_adjust_pi(struct task_struct *p);
b29739f9 2019#else
e868171a 2020static inline int rt_mutex_getprio(struct task_struct *p)
b29739f9
IM
2021{
2022 return p->normal_prio;
2023}
95e02ca9 2024# define rt_mutex_adjust_pi(p) do { } while (0)
b29739f9
IM
2025#endif
2026
d95f4122 2027extern bool yield_to(struct task_struct *p, bool preempt);
36c8b586
IM
2028extern void set_user_nice(struct task_struct *p, long nice);
2029extern int task_prio(const struct task_struct *p);
2030extern int task_nice(const struct task_struct *p);
2031extern int can_nice(const struct task_struct *p, const int nice);
2032extern int task_curr(const struct task_struct *p);
1da177e4 2033extern int idle_cpu(int cpu);
fe7de49f
KM
2034extern int sched_setscheduler(struct task_struct *, int,
2035 const struct sched_param *);
961ccddd 2036extern int sched_setscheduler_nocheck(struct task_struct *, int,
fe7de49f 2037 const struct sched_param *);
36c8b586
IM
2038extern struct task_struct *idle_task(int cpu);
2039extern struct task_struct *curr_task(int cpu);
2040extern void set_curr_task(int cpu, struct task_struct *p);
1da177e4
LT
2041
2042void yield(void);
2043
2044/*
2045 * The default (Linux) execution domain.
2046 */
2047extern struct exec_domain default_exec_domain;
2048
2049union thread_union {
2050 struct thread_info thread_info;
2051 unsigned long stack[THREAD_SIZE/sizeof(long)];
2052};
2053
2054#ifndef __HAVE_ARCH_KSTACK_END
2055static inline int kstack_end(void *addr)
2056{
2057 /* Reliable end of stack detection:
2058 * Some APM bios versions misalign the stack
2059 */
2060 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2061}
2062#endif
2063
2064extern union thread_union init_thread_union;
2065extern struct task_struct init_task;
2066
2067extern struct mm_struct init_mm;
2068
198fe21b
PE
2069extern struct pid_namespace init_pid_ns;
2070
2071/*
2072 * find a task by one of its numerical ids
2073 *
198fe21b
PE
2074 * find_task_by_pid_ns():
2075 * finds a task by its pid in the specified namespace
228ebcbe
PE
2076 * find_task_by_vpid():
2077 * finds a task by its virtual pid
198fe21b 2078 *
e49859e7 2079 * see also find_vpid() etc in include/linux/pid.h
198fe21b
PE
2080 */
2081
228ebcbe
PE
2082extern struct task_struct *find_task_by_vpid(pid_t nr);
2083extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2084 struct pid_namespace *ns);
198fe21b 2085
8520d7c7 2086extern void __set_special_pids(struct pid *pid);
1da177e4
LT
2087
2088/* per-UID process charging. */
acce292c 2089extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
1da177e4
LT
2090static inline struct user_struct *get_uid(struct user_struct *u)
2091{
2092 atomic_inc(&u->__count);
2093 return u;
2094}
2095extern void free_uid(struct user_struct *);
28f300d2 2096extern void release_uids(struct user_namespace *ns);
1da177e4
LT
2097
2098#include <asm/current.h>
2099
f0af911a 2100extern void xtime_update(unsigned long ticks);
1da177e4 2101
b3c97528
HH
2102extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2103extern int wake_up_process(struct task_struct *tsk);
3e51e3ed 2104extern void wake_up_new_task(struct task_struct *tsk);
1da177e4
LT
2105#ifdef CONFIG_SMP
2106 extern void kick_process(struct task_struct *tsk);
2107#else
2108 static inline void kick_process(struct task_struct *tsk) { }
2109#endif
3e51e3ed 2110extern void sched_fork(struct task_struct *p);
ad46c2c4 2111extern void sched_dead(struct task_struct *p);
1da177e4 2112
1da177e4
LT
2113extern void proc_caches_init(void);
2114extern void flush_signals(struct task_struct *);
3bcac026 2115extern void __flush_signals(struct task_struct *);
10ab825b 2116extern void ignore_signals(struct task_struct *);
1da177e4
LT
2117extern void flush_signal_handlers(struct task_struct *, int force_default);
2118extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2119
2120static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2121{
2122 unsigned long flags;
2123 int ret;
2124
2125 spin_lock_irqsave(&tsk->sighand->siglock, flags);
2126 ret = dequeue_signal(tsk, mask, info);
2127 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2128
2129 return ret;
2130}
2131
2132extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2133 sigset_t *mask);
2134extern void unblock_all_signals(void);
2135extern void release_task(struct task_struct * p);
2136extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1da177e4
LT
2137extern int force_sigsegv(int, struct task_struct *);
2138extern int force_sig_info(int, struct siginfo *, struct task_struct *);
c4b92fc1 2139extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
c4b92fc1 2140extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2425c08b 2141extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
c4b92fc1
EB
2142extern int kill_pgrp(struct pid *pid, int sig, int priv);
2143extern int kill_pid(struct pid *pid, int sig, int priv);
c3de4b38 2144extern int kill_proc_info(int, struct siginfo *, pid_t);
2b2a1ff6 2145extern int do_notify_parent(struct task_struct *, int);
a7f0765e 2146extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
1da177e4 2147extern void force_sig(int, struct task_struct *);
1da177e4 2148extern int send_sig(int, struct task_struct *, int);
09faef11 2149extern int zap_other_threads(struct task_struct *p);
1da177e4
LT
2150extern struct sigqueue *sigqueue_alloc(void);
2151extern void sigqueue_free(struct sigqueue *);
ac5c2153 2152extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
9ac95f2f 2153extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1da177e4
LT
2154extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2155
9ec52099
CLG
2156static inline int kill_cad_pid(int sig, int priv)
2157{
2158 return kill_pid(cad_pid, sig, priv);
2159}
2160
1da177e4
LT
2161/* These can be the second arg to send_sig_info/send_group_sig_info. */
2162#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2163#define SEND_SIG_PRIV ((struct siginfo *) 1)
2164#define SEND_SIG_FORCED ((struct siginfo *) 2)
2165
2a855dd0
SAS
2166/*
2167 * True if we are on the alternate signal stack.
2168 */
1da177e4
LT
2169static inline int on_sig_stack(unsigned long sp)
2170{
2a855dd0
SAS
2171#ifdef CONFIG_STACK_GROWSUP
2172 return sp >= current->sas_ss_sp &&
2173 sp - current->sas_ss_sp < current->sas_ss_size;
2174#else
2175 return sp > current->sas_ss_sp &&
2176 sp - current->sas_ss_sp <= current->sas_ss_size;
2177#endif
1da177e4
LT
2178}
2179
2180static inline int sas_ss_flags(unsigned long sp)
2181{
2182 return (current->sas_ss_size == 0 ? SS_DISABLE
2183 : on_sig_stack(sp) ? SS_ONSTACK : 0);
2184}
2185
1da177e4
LT
2186/*
2187 * Routines for handling mm_structs
2188 */
2189extern struct mm_struct * mm_alloc(void);
2190
2191/* mmdrop drops the mm and the page tables */
b3c97528 2192extern void __mmdrop(struct mm_struct *);
1da177e4
LT
2193static inline void mmdrop(struct mm_struct * mm)
2194{
6fb43d7b 2195 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
1da177e4
LT
2196 __mmdrop(mm);
2197}
de03c72c 2198extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
1da177e4
LT
2199
2200/* mmput gets rid of the mappings and all user-space */
2201extern void mmput(struct mm_struct *);
2202/* Grab a reference to a task's mm, if it is not already going away */
2203extern struct mm_struct *get_task_mm(struct task_struct *task);
2204/* Remove the current tasks stale references to the old mm_struct */
2205extern void mm_release(struct task_struct *, struct mm_struct *);
402b0862
CO
2206/* Allocate a new mm structure and copy contents from tsk->mm */
2207extern struct mm_struct *dup_mm(struct task_struct *tsk);
1da177e4 2208
6f2c55b8
AD
2209extern int copy_thread(unsigned long, unsigned long, unsigned long,
2210 struct task_struct *, struct pt_regs *);
1da177e4
LT
2211extern void flush_thread(void);
2212extern void exit_thread(void);
2213
1da177e4 2214extern void exit_files(struct task_struct *);
a7e5328a 2215extern void __cleanup_sighand(struct sighand_struct *);
cbaffba1 2216
1da177e4 2217extern void exit_itimers(struct signal_struct *);
cbaffba1 2218extern void flush_itimer_signals(void);
1da177e4
LT
2219
2220extern NORET_TYPE void do_group_exit(int);
2221
1da177e4
LT
2222extern void daemonize(const char *, ...);
2223extern int allow_signal(int);
2224extern int disallow_signal(int);
1da177e4 2225
d7627467
DH
2226extern int do_execve(const char *,
2227 const char __user * const __user *,
2228 const char __user * const __user *, struct pt_regs *);
1da177e4 2229extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
36c8b586 2230struct task_struct *fork_idle(int);
1da177e4
LT
2231
2232extern void set_task_comm(struct task_struct *tsk, char *from);
59714d65 2233extern char *get_task_comm(char *to, struct task_struct *tsk);
1da177e4
LT
2234
2235#ifdef CONFIG_SMP
317f3941 2236void scheduler_ipi(void);
85ba2d86 2237extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1da177e4 2238#else
184748cc 2239static inline void scheduler_ipi(void) { }
85ba2d86
RM
2240static inline unsigned long wait_task_inactive(struct task_struct *p,
2241 long match_state)
2242{
2243 return 1;
2244}
1da177e4
LT
2245#endif
2246
05725f7e
JP
2247#define next_task(p) \
2248 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
1da177e4
LT
2249
2250#define for_each_process(p) \
2251 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2252
5bb459bb 2253extern bool current_is_single_threaded(void);
d84f4f99 2254
1da177e4
LT
2255/*
2256 * Careful: do_each_thread/while_each_thread is a double loop so
2257 * 'break' will not work as expected - use goto instead.
2258 */
2259#define do_each_thread(g, t) \
2260 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2261
2262#define while_each_thread(g, t) \
2263 while ((t = next_thread(t)) != g)
2264
7e49827c
ON
2265static inline int get_nr_threads(struct task_struct *tsk)
2266{
b3ac022c 2267 return tsk->signal->nr_threads;
7e49827c
ON
2268}
2269
de12a787
EB
2270/* de_thread depends on thread_group_leader not being a pid based check */
2271#define thread_group_leader(p) (p == p->group_leader)
1da177e4 2272
0804ef4b
EB
2273/* Do to the insanities of de_thread it is possible for a process
2274 * to have the pid of the thread group leader without actually being
2275 * the thread group leader. For iteration through the pids in proc
2276 * all we care about is that we have a task with the appropriate
2277 * pid, we don't actually care if we have the right task.
2278 */
e868171a 2279static inline int has_group_leader_pid(struct task_struct *p)
0804ef4b
EB
2280{
2281 return p->pid == p->tgid;
2282}
2283
bac0abd6
PE
2284static inline
2285int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2286{
2287 return p1->tgid == p2->tgid;
2288}
2289
36c8b586 2290static inline struct task_struct *next_thread(const struct task_struct *p)
47e65328 2291{
05725f7e
JP
2292 return list_entry_rcu(p->thread_group.next,
2293 struct task_struct, thread_group);
47e65328
ON
2294}
2295
e868171a 2296static inline int thread_group_empty(struct task_struct *p)
1da177e4 2297{
47e65328 2298 return list_empty(&p->thread_group);
1da177e4
LT
2299}
2300
2301#define delay_group_leader(p) \
2302 (thread_group_leader(p) && !thread_group_empty(p))
2303
39c626ae
ON
2304static inline int task_detached(struct task_struct *p)
2305{
2306 return p->exit_signal == -1;
2307}
2308
1da177e4 2309/*
260ea101 2310 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
22e2c507 2311 * subscriptions and synchronises with wait4(). Also used in procfs. Also
ddbcc7e8
PM
2312 * pins the final release of task.io_context. Also protects ->cpuset and
2313 * ->cgroup.subsys[].
1da177e4
LT
2314 *
2315 * Nests both inside and outside of read_lock(&tasklist_lock).
2316 * It must not be nested with write_lock_irq(&tasklist_lock),
2317 * neither inside nor outside.
2318 */
2319static inline void task_lock(struct task_struct *p)
2320{
2321 spin_lock(&p->alloc_lock);
2322}
2323
2324static inline void task_unlock(struct task_struct *p)
2325{
2326 spin_unlock(&p->alloc_lock);
2327}
2328
b8ed374e 2329extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
f63ee72e
ON
2330 unsigned long *flags);
2331
b8ed374e
NK
2332#define lock_task_sighand(tsk, flags) \
2333({ struct sighand_struct *__ss; \
2334 __cond_lock(&(tsk)->sighand->siglock, \
2335 (__ss = __lock_task_sighand(tsk, flags))); \
2336 __ss; \
2337}) \
2338
f63ee72e
ON
2339static inline void unlock_task_sighand(struct task_struct *tsk,
2340 unsigned long *flags)
2341{
2342 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2343}
2344
4714d1d3
BB
2345/* See the declaration of threadgroup_fork_lock in signal_struct. */
2346#ifdef CONFIG_CGROUPS
2347static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2348{
2349 down_read(&tsk->signal->threadgroup_fork_lock);
2350}
2351static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2352{
2353 up_read(&tsk->signal->threadgroup_fork_lock);
2354}
2355static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2356{
2357 down_write(&tsk->signal->threadgroup_fork_lock);
2358}
2359static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2360{
2361 up_write(&tsk->signal->threadgroup_fork_lock);
2362}
2363#else
2364static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2365static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2366static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2367static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2368#endif
2369
f037360f
AV
2370#ifndef __HAVE_THREAD_FUNCTIONS
2371
f7e4217b
RZ
2372#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2373#define task_stack_page(task) ((task)->stack)
a1261f54 2374
10ebffde
AV
2375static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2376{
2377 *task_thread_info(p) = *task_thread_info(org);
2378 task_thread_info(p)->task = p;
2379}
2380
2381static inline unsigned long *end_of_stack(struct task_struct *p)
2382{
f7e4217b 2383 return (unsigned long *)(task_thread_info(p) + 1);
10ebffde
AV
2384}
2385
f037360f
AV
2386#endif
2387
8b05c7e6
FT
2388static inline int object_is_on_stack(void *obj)
2389{
2390 void *stack = task_stack_page(current);
2391
2392 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2393}
2394
8c9843e5
BH
2395extern void thread_info_cache_init(void);
2396
7c9f8861
ES
2397#ifdef CONFIG_DEBUG_STACK_USAGE
2398static inline unsigned long stack_not_used(struct task_struct *p)
2399{
2400 unsigned long *n = end_of_stack(p);
2401
2402 do { /* Skip over canary */
2403 n++;
2404 } while (!*n);
2405
2406 return (unsigned long)n - (unsigned long)end_of_stack(p);
2407}
2408#endif
2409
1da177e4
LT
2410/* set thread flags in other task's structures
2411 * - see asm/thread_info.h for TIF_xxxx flags available
2412 */
2413static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2414{
a1261f54 2415 set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2416}
2417
2418static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2419{
a1261f54 2420 clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2421}
2422
2423static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2424{
a1261f54 2425 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2426}
2427
2428static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2429{
a1261f54 2430 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2431}
2432
2433static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2434{
a1261f54 2435 return test_ti_thread_flag(task_thread_info(tsk), flag);
1da177e4
LT
2436}
2437
2438static inline void set_tsk_need_resched(struct task_struct *tsk)
2439{
2440 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2441}
2442
2443static inline void clear_tsk_need_resched(struct task_struct *tsk)
2444{
2445 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2446}
2447
8ae121ac
GH
2448static inline int test_tsk_need_resched(struct task_struct *tsk)
2449{
2450 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2451}
2452
690cc3ff
EB
2453static inline int restart_syscall(void)
2454{
2455 set_tsk_thread_flag(current, TIF_SIGPENDING);
2456 return -ERESTARTNOINTR;
2457}
2458
1da177e4
LT
2459static inline int signal_pending(struct task_struct *p)
2460{
2461 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2462}
f776d12d 2463
d9588725
RM
2464static inline int __fatal_signal_pending(struct task_struct *p)
2465{
2466 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2467}
f776d12d
MW
2468
2469static inline int fatal_signal_pending(struct task_struct *p)
2470{
2471 return signal_pending(p) && __fatal_signal_pending(p);
2472}
2473
16882c1e
ON
2474static inline int signal_pending_state(long state, struct task_struct *p)
2475{
2476 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2477 return 0;
2478 if (!signal_pending(p))
2479 return 0;
2480
16882c1e
ON
2481 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2482}
2483
1da177e4
LT
2484static inline int need_resched(void)
2485{
9404ef02 2486 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
1da177e4
LT
2487}
2488
2489/*
2490 * cond_resched() and cond_resched_lock(): latency reduction via
2491 * explicit rescheduling in places that are safe. The return
2492 * value indicates whether a reschedule was done in fact.
2493 * cond_resched_lock() will drop the spinlock before scheduling,
2494 * cond_resched_softirq() will enable bhs before scheduling.
2495 */
c3921ab7 2496extern int _cond_resched(void);
6f80bd98 2497
613afbf8
FW
2498#define cond_resched() ({ \
2499 __might_sleep(__FILE__, __LINE__, 0); \
2500 _cond_resched(); \
2501})
6f80bd98 2502
613afbf8
FW
2503extern int __cond_resched_lock(spinlock_t *lock);
2504
716a4234
FW
2505#ifdef CONFIG_PREEMPT
2506#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
02b67cc3 2507#else
716a4234 2508#define PREEMPT_LOCK_OFFSET 0
02b67cc3 2509#endif
716a4234 2510
613afbf8 2511#define cond_resched_lock(lock) ({ \
716a4234 2512 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
613afbf8
FW
2513 __cond_resched_lock(lock); \
2514})
2515
2516extern int __cond_resched_softirq(void);
2517
75e1056f
VP
2518#define cond_resched_softirq() ({ \
2519 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2520 __cond_resched_softirq(); \
613afbf8 2521})
1da177e4
LT
2522
2523/*
2524 * Does a critical section need to be broken due to another
95c354fe
NP
2525 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2526 * but a general need for low latency)
1da177e4 2527 */
95c354fe 2528static inline int spin_needbreak(spinlock_t *lock)
1da177e4 2529{
95c354fe
NP
2530#ifdef CONFIG_PREEMPT
2531 return spin_is_contended(lock);
2532#else
1da177e4 2533 return 0;
95c354fe 2534#endif
1da177e4
LT
2535}
2536
f06febc9
FM
2537/*
2538 * Thread group CPU time accounting.
2539 */
4cd4c1b4 2540void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
4da94d49 2541void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
f06febc9 2542
490dea45 2543static inline void thread_group_cputime_init(struct signal_struct *sig)
f06febc9 2544{
4cd4c1b4 2545 spin_lock_init(&sig->cputimer.lock);
f06febc9
FM
2546}
2547
7bb44ade
RM
2548/*
2549 * Reevaluate whether the task has signals pending delivery.
2550 * Wake the task if so.
2551 * This is required every time the blocked sigset_t changes.
2552 * callers must hold sighand->siglock.
2553 */
2554extern void recalc_sigpending_and_wake(struct task_struct *t);
1da177e4
LT
2555extern void recalc_sigpending(void);
2556
2557extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2558
2559/*
2560 * Wrappers for p->thread_info->cpu access. No-op on UP.
2561 */
2562#ifdef CONFIG_SMP
2563
2564static inline unsigned int task_cpu(const struct task_struct *p)
2565{
a1261f54 2566 return task_thread_info(p)->cpu;
1da177e4
LT
2567}
2568
c65cc870 2569extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1da177e4
LT
2570
2571#else
2572
2573static inline unsigned int task_cpu(const struct task_struct *p)
2574{
2575 return 0;
2576}
2577
2578static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2579{
2580}
2581
2582#endif /* CONFIG_SMP */
2583
96f874e2
RR
2584extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2585extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
5c45bf27 2586
1da177e4
LT
2587extern void normalize_rt_tasks(void);
2588
7c941438 2589#ifdef CONFIG_CGROUP_SCHED
9b5b7751 2590
07e06b01 2591extern struct task_group root_task_group;
9b5b7751 2592
ec7dc8ac 2593extern struct task_group *sched_create_group(struct task_group *parent);
4cf86d77 2594extern void sched_destroy_group(struct task_group *tg);
9b5b7751 2595extern void sched_move_task(struct task_struct *tsk);
052f1dc7 2596#ifdef CONFIG_FAIR_GROUP_SCHED
4cf86d77 2597extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
5cb350ba 2598extern unsigned long sched_group_shares(struct task_group *tg);
052f1dc7
PZ
2599#endif
2600#ifdef CONFIG_RT_GROUP_SCHED
9f0c1e56
PZ
2601extern int sched_group_set_rt_runtime(struct task_group *tg,
2602 long rt_runtime_us);
2603extern long sched_group_rt_runtime(struct task_group *tg);
d0b27fa7
PZ
2604extern int sched_group_set_rt_period(struct task_group *tg,
2605 long rt_period_us);
2606extern long sched_group_rt_period(struct task_group *tg);
54e99124 2607extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
052f1dc7 2608#endif
9b5b7751
SV
2609#endif
2610
54e99124
DG
2611extern int task_can_switch_user(struct user_struct *up,
2612 struct task_struct *tsk);
2613
4b98d11b
AD
2614#ifdef CONFIG_TASK_XACCT
2615static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2616{
940389b8 2617 tsk->ioac.rchar += amt;
4b98d11b
AD
2618}
2619
2620static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2621{
940389b8 2622 tsk->ioac.wchar += amt;
4b98d11b
AD
2623}
2624
2625static inline void inc_syscr(struct task_struct *tsk)
2626{
940389b8 2627 tsk->ioac.syscr++;
4b98d11b
AD
2628}
2629
2630static inline void inc_syscw(struct task_struct *tsk)
2631{
940389b8 2632 tsk->ioac.syscw++;
4b98d11b
AD
2633}
2634#else
2635static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2636{
2637}
2638
2639static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2640{
2641}
2642
2643static inline void inc_syscr(struct task_struct *tsk)
2644{
2645}
2646
2647static inline void inc_syscw(struct task_struct *tsk)
2648{
2649}
2650#endif
2651
82455257
DH
2652#ifndef TASK_SIZE_OF
2653#define TASK_SIZE_OF(tsk) TASK_SIZE
2654#endif
2655
cf475ad2
BS
2656#ifdef CONFIG_MM_OWNER
2657extern void mm_update_next_owner(struct mm_struct *mm);
2658extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2659#else
2660static inline void mm_update_next_owner(struct mm_struct *mm)
2661{
2662}
2663
2664static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2665{
2666}
2667#endif /* CONFIG_MM_OWNER */
2668
3e10e716
JS
2669static inline unsigned long task_rlimit(const struct task_struct *tsk,
2670 unsigned int limit)
2671{
2672 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2673}
2674
2675static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2676 unsigned int limit)
2677{
2678 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2679}
2680
2681static inline unsigned long rlimit(unsigned int limit)
2682{
2683 return task_rlimit(current, limit);
2684}
2685
2686static inline unsigned long rlimit_max(unsigned int limit)
2687{
2688 return task_rlimit_max(current, limit);
2689}
2690
1da177e4
LT
2691#endif /* __KERNEL__ */
2692
2693#endif