diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-24 12:48:46 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-24 12:48:46 +0200 |
commit | 8c82a17e9c924c0e9f13e75e4c2f6bca19a4b516 (patch) | |
tree | d535f46a917e14e90deccb29ad00aac016ad18dd /include/linux/sched.h | |
parent | 4ce72a2c063a7fa8e42a9435440ae3364115a58d (diff) | |
parent | 57f8f7b60db6f1ed2c6918ab9230c4623a9dbe37 (diff) | |
download | kernel-common-8c82a17e9c924c0e9f13e75e4c2f6bca19a4b516.tar.gz kernel-common-8c82a17e9c924c0e9f13e75e4c2f6bca19a4b516.tar.bz2 kernel-common-8c82a17e9c924c0e9f13e75e4c2f6bca19a4b516.zip |
Merge commit 'v2.6.28-rc1' into sched/urgent
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 107 |
1 files changed, 88 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c05b45faef18..b483f39a7112 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -402,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_DUMP_MAPPED_PRIVATE 4 #define MMF_DUMP_MAPPED_SHARED 5 #define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 5 +#define MMF_DUMP_FILTER_BITS 7 #define MMF_DUMP_FILTER_MASK \ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) #define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif struct sighand_struct { atomic_t count; @@ -424,6 +433,39 @@ struct pacct_struct { unsigned long ac_minflt, ac_majflt; }; +/** + * struct task_cputime - collected CPU time counts + * @utime: time spent in user mode, in &cputime_t units + * @stime: time spent in kernel mode, in &cputime_t units + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are + * tracked for threads and thread groups. Most things considering + * CPU time want to group these counts together and treat all three + * of them in parallel. + */ +struct task_cputime { + cputime_t utime; + cputime_t stime; + unsigned long long sum_exec_runtime; +}; +/* Alternate field names when used to cache expirations. */ +#define prof_exp stime +#define virt_exp utime +#define sched_exp sum_exec_runtime + +/** + * struct thread_group_cputime - thread group interval timer counts + * @totals: thread group interval timers; substructure for + * uniprocessor kernel, per-cpu for SMP kernel. + * + * This structure contains the version of task_cputime, above, that is + * used for thread group CPU clock calculations. + */ +struct thread_group_cputime { + struct task_cputime *totals; +}; + /* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always @@ -469,6 +511,17 @@ struct signal_struct { cputime_t it_prof_expires, it_virt_expires; cputime_t it_prof_incr, it_virt_incr; + /* + * Thread group totals for process CPU clocks. + * See thread_group_cputime(), et al, for details. + */ + struct thread_group_cputime cputime; + + /* Earliest-expiration cache. */ + struct task_cputime cputime_expires; + + struct list_head cpu_timers[3]; + /* job control IDs */ /* @@ -499,7 +552,7 @@ struct signal_struct { * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ - cputime_t utime, stime, cutime, cstime; + cputime_t cutime, cstime; cputime_t gtime; cputime_t cgtime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; @@ -508,14 +561,6 @@ struct signal_struct { struct task_io_accounting ioac; /* - * Cumulative ns of scheduled CPU time for dead threads in the - * group, not including a zombie group leader. (This only differs - * from jiffies_to_ns(utime + stime) if sched_clock uses something - * other than jiffies.) - */ - unsigned long long sum_sched_runtime; - - /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one @@ -526,8 +571,6 @@ struct signal_struct { */ struct rlimit rlim[RLIM_NLIMITS]; - struct list_head cpu_timers[3]; - /* keep the process-shared keyrings here so that they do the right * thing in threads created with CLONE_THREAD */ #ifdef CONFIG_KEYS @@ -637,10 +680,6 @@ struct sched_info { }; #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ -#ifdef CONFIG_SCHEDSTATS -extern const struct file_operations proc_schedstat_operations; -#endif /* CONFIG_SCHEDSTATS */ - #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { spinlock_t lock; @@ -1138,8 +1177,7 @@ struct task_struct { /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; - cputime_t it_prof_expires, it_virt_expires; - unsigned long long it_sched_expires; + struct task_cputime cputime_expires; struct list_head cpu_timers[3]; /* process credentials */ @@ -1305,6 +1343,12 @@ struct task_struct { int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; #endif + /* + * time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + unsigned long timer_slack_ns; + unsigned long default_timer_slack_ns; }; /* @@ -1589,6 +1633,7 @@ extern unsigned long long cpu_clock(int cpu); extern unsigned long long task_sched_runtime(struct task_struct *task); +extern unsigned long long thread_group_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -2087,6 +2132,30 @@ static inline int spin_needbreak(spinlock_t *lock) } /* + * Thread group CPU time accounting. + */ + +extern int thread_group_cputime_alloc(struct task_struct *); +extern void thread_group_cputime(struct task_struct *, struct task_cputime *); + +static inline void thread_group_cputime_init(struct signal_struct *sig) +{ + sig->cputime.totals = NULL; +} + +static inline int thread_group_cputime_clone_thread(struct task_struct *curr) +{ + if (curr->signal->cputime.totals) + return 0; + return thread_group_cputime_alloc(curr); +} + +static inline void thread_group_cputime_free(struct signal_struct *sig) +{ + free_percpu(sig->cputime.totals); +} + +/* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. * This is required every time the blocked sigset_t changes. |