summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h28
1 files changed, 26 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1a2ebd39b800..c147e7024f11 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -334,6 +334,14 @@ static inline void lockup_detector_init(void)
}
#endif
+#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND)
+void lockup_detector_bootcpu_resume(void);
+#else
+static inline void lockup_detector_bootcpu_resume(void)
+{
+}
+#endif
+
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
@@ -406,6 +414,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
+/* get/set_dumpable() values */
+#define SUID_DUMPABLE_DISABLED 0
+#define SUID_DUMPABLE_ENABLED 1
+#define SUID_DUMPABLE_SAFE 2
+
/* mm flags */
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
@@ -949,6 +962,7 @@ struct sched_domain {
unsigned int smt_gain;
int flags; /* See SD_* */
int level;
+ int idle_buddy; /* cpu assigned to select_idle_sibling() */
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
@@ -1244,6 +1258,9 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
@@ -1567,7 +1584,7 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
int do_batch; /* incremented when batch uncharge started */
struct mem_cgroup *memcg; /* target memcg of uncharge */
@@ -1877,6 +1894,13 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif
+static inline void tsk_restore_flags(struct task_struct *task,
+ unsigned long orig_flags, unsigned long flags)
+{
+ task->flags &= ~flags;
+ task->flags |= orig_flags & flags;
+}
+
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
@@ -2721,7 +2745,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
-#endif
+#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);