summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h205
1 files changed, 121 insertions, 84 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 55e30d114477..4cae9b81a1f8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
+struct bts_tracer;
/*
* List of flags we want to share for kernel threads,
@@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(void);
extern void task_rq_unlock_wait(struct task_struct *p);
-extern cpumask_t nohz_cpu_mask;
+extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
#else
@@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
}
#endif
-extern unsigned long rt_needs_cpu(int cpu);
-
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
@@ -285,7 +284,6 @@ long io_schedule_timeout(long timeout);
extern void cpu_init (void);
extern void trap_init(void);
-extern void account_process_tick(struct task_struct *task, int user);
extern void update_process_times(int user);
extern void scheduler_tick(void);
@@ -388,6 +386,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
(mm)->hiwater_vm = (mm)->total_vm; \
} while (0)
+#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
+#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
+
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
@@ -572,12 +573,6 @@ struct signal_struct {
*/
struct rlimit rlim[RLIM_NLIMITS];
- /* keep the process-shared keyrings here so that they do the right
- * thing in threads created with CLONE_THREAD */
-#ifdef CONFIG_KEYS
- struct key *session_keyring; /* keyring inherited over fork */
- struct key *process_keyring; /* keyring private to this process */
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
@@ -648,6 +643,7 @@ struct user_struct {
/* Hash table maintenance information */
struct hlist_node uidhash_node;
uid_t uid;
+ struct user_namespace *user_ns;
#ifdef CONFIG_USER_SCHED
struct task_group *tg;
@@ -665,6 +661,7 @@ extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
+
struct backing_dev_info;
struct reclaim_state;
@@ -672,8 +669,7 @@ struct reclaim_state;
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
- unsigned long long cpu_time, /* time spent on the cpu */
- run_delay; /* time spent waiting on a runqueue */
+ unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -764,20 +760,51 @@ enum cpu_idle_type {
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
-#define BALANCE_FOR_MC_POWER \
- (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
+enum powersavings_balance_level {
+ POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
+ POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
+ * first for long running threads
+ */
+ POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
+ * cpu package for power savings
+ */
+ MAX_POWERSAVINGS_BALANCE_LEVELS
+};
+
+extern int sched_mc_power_savings, sched_smt_power_savings;
+
+static inline int sd_balance_for_mc_power(void)
+{
+ if (sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
-#define BALANCE_FOR_PKG_POWER \
- ((sched_mc_power_savings || sched_smt_power_savings) ? \
- SD_POWERSAVINGS_BALANCE : 0)
+ return 0;
+}
-#define test_sd_parent(sd, flag) ((sd->parent && \
- (sd->parent->flags & flag)) ? 1 : 0)
+static inline int sd_balance_for_package_power(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
+ return 0;
+}
+
+/*
+ * Optimise SD flags for power savings:
+ * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
+ * Keep default SD flags if sched_{smt,mc}_power_saving=0
+ */
+
+static inline int sd_power_saving_flags(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_BALANCE_NEWIDLE;
+
+ return 0;
+}
struct sched_group {
struct sched_group *next; /* Must be a circular list */
- cpumask_t cpumask;
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
@@ -790,8 +817,15 @@ struct sched_group {
* (see include/linux/reciprocal_div.h)
*/
u32 reciprocal_cpu_power;
+
+ unsigned long cpumask[];
};
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
@@ -815,7 +849,6 @@ struct sched_domain {
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
- cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
@@ -870,56 +903,41 @@ struct sched_domain {
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
+
+ /* span of all CPUs in this domain */
+ unsigned long span[];
};
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return to_cpumask(sd->span);
+}
+
+extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new);
-extern int arch_reinit_sched_domains(void);
+
+/* Test a flag in parent sched domain */
+static inline int test_sd_parent(struct sched_domain *sd, int flag)
+{
+ if (sd->parent && (sd->parent->flags & flag))
+ return 1;
+
+ return 0;
+}
#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
-partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new)
{
}
#endif /* !CONFIG_SMP */
struct io_context; /* See blkdev.h */
-#define NGROUPS_SMALL 32
-#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
-struct group_info {
- int ngroups;
- atomic_t usage;
- gid_t small_block[NGROUPS_SMALL];
- int nblocks;
- gid_t *blocks[0];
-};
-/*
- * get_group_info() must be called with the owning task locked (via task_lock())
- * when task != current. The reason being that the vast majority of callers are
- * looking at current->group_info, which can not be changed except by the
- * current task. Changing current->group_info requires the task lock, too.
- */
-#define get_group_info(group_info) do { \
- atomic_inc(&(group_info)->usage); \
-} while (0)
-
-#define put_group_info(group_info) do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
-} while (0)
-
-extern struct group_info *groups_alloc(int gidsetsize);
-extern void groups_free(struct group_info *group_info);
-extern int set_current_groups(struct group_info *group_info);
-extern int groups_search(struct group_info *group_info, gid_t grp);
-/* access the groups "array" with this macro */
-#define GROUP_AT(gi, i) \
- ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
@@ -963,7 +981,7 @@ struct sched_class {
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const cpumask_t *newmask);
+ const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -1165,6 +1183,19 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
+#ifdef CONFIG_X86_PTRACE_BTS
+ /*
+ * This is the tracer handle for the ptrace BTS extension.
+ * This field actually belongs to the ptracer task.
+ */
+ struct bts_tracer *bts;
+ /*
+ * The buffer to hold the BTS data.
+ */
+ void *bts_buffer;
+ size_t bts_size;
+#endif /* CONFIG_X86_PTRACE_BTS */
+
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1186,17 +1217,12 @@ struct task_struct {
struct list_head cpu_timers[3];
/* process credentials */
- uid_t uid,euid,suid,fsuid;
- gid_t gid,egid,sgid,fsgid;
- struct group_info *group_info;
- kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- struct user_struct *user;
- unsigned securebits;
-#ifdef CONFIG_KEYS
- unsigned char jit_keyring; /* default keyring to attach requested keys to */
- struct key *request_key_auth; /* assumed request_key authority */
- struct key *thread_keyring; /* keyring private to this thread */
-#endif
+ const struct cred *real_cred; /* objective and real subjective task
+ * credentials (COW) */
+ const struct cred *cred; /* effective (overridable) subjective task
+ * credentials (COW) */
+ struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
+
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
@@ -1233,9 +1259,6 @@ struct task_struct {
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
-#ifdef CONFIG_SECURITY
- void *security;
-#endif
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
uid_t loginuid;
@@ -1356,6 +1379,23 @@ struct task_struct {
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored adress in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack *ret_stack;
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+#endif
+#ifdef CONFIG_TRACING
+ /* state flags for use by tracers */
+ unsigned long trace;
+#endif
};
/*
@@ -1594,12 +1634,12 @@ extern cputime_t task_gtime(struct task_struct *p);
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask);
+ const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask)
+ const struct cpumask *new_mask)
{
- if (!cpu_isset(0, *new_mask))
+ if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
@@ -1666,16 +1706,16 @@ extern void wake_up_idle_cpu(int cpu);
static inline void wake_up_idle_cpu(int cpu) { }
#endif
-#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_shares_ratelimit;
+extern unsigned int sysctl_sched_shares_thresh;
+#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_shares_ratelimit;
-extern unsigned int sysctl_sched_shares_thresh;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
@@ -1775,7 +1815,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
return u;
}
extern void free_uid(struct user_struct *);
-extern void switch_uid(struct user_struct *);
extern void release_uids(struct user_namespace *ns);
#include <asm/current.h>
@@ -1794,9 +1833,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
-extern int in_group_p(gid_t);
-extern int in_egroup_p(gid_t);
-
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
@@ -1928,6 +1964,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
+extern bool is_single_threaded(struct task_struct *);
+
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
@@ -2212,10 +2250,8 @@ __trace_special(void *__tr, void *__data,
}
#endif
-extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
-extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-
-extern int sched_mc_power_savings, sched_smt_power_savings;
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void);
@@ -2224,6 +2260,7 @@ extern void normalize_rt_tasks(void);
extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
#endif
extern struct task_group *sched_create_group(struct task_group *parent);