diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-06-12 12:55:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-06-12 12:55:18 -0700 |
commit | 1b3cba8e60c67c968d108ac55c77e32c1928dec3 (patch) | |
tree | 79ce35debeb7e203558d884efd03c7cf160cceaa | |
parent | 14a73f54798f39854e521fb596da7d50b7566bbd (diff) | |
parent | 7a232e0350940d2664f4de5cc3f0f443bae5062d (diff) |
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: 64-bit: fix arithmetics overflow
sched: fair group: fix overflow(was: fix divide by zero)
sched: fix TASK_WAKEKILL vs SIGKILL race
-rw-r--r-- | include/linux/sched.h | 13 | ||||
-rw-r--r-- | kernel/sched.c | 22 |
2 files changed, 27 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ae0be3c62375..c5d3f847ca8d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2026,6 +2026,19 @@ static inline int fatal_signal_pending(struct task_struct *p) return signal_pending(p) && __fatal_signal_pending(p); } +static inline int signal_pending_state(long state, struct task_struct *p) +{ + if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) + return 0; + if (!signal_pending(p)) + return 0; + + if (state & (__TASK_STOPPED | __TASK_TRACED)) + return 0; + + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); +} + static inline int need_resched(void) { return unlikely(test_thread_flag(TIF_NEED_RESCHED)); diff --git a/kernel/sched.c b/kernel/sched.c index bfb8ad8ed171..eaf6751e7612 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -312,12 +312,15 @@ static DEFINE_SPINLOCK(task_group_lock); #endif /* - * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. + * A weight of 0 or 1 can cause arithmetics problems. + * A weight of a cfs_rq is the sum of weights of which entities + * are queued on this cfs_rq, so a weight of a entity should not be + * too large, so as the shares value of a task group. * (The default weight is 1024 - so there's no practical * limitation from this.) */ #define MIN_SHARES 2 -#define MAX_SHARES (ULONG_MAX - 1) +#define MAX_SHARES (1UL << 18) static int init_task_group_load = INIT_TASK_GROUP_LOAD; #endif @@ -1337,8 +1340,13 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, { u64 tmp; - if (!lw->inv_weight) - lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)/(lw->weight+1); + if (!lw->inv_weight) { + if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) + lw->inv_weight = 1; + else + lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2) + / (lw->weight+1); + } tmp = (u64)delta_exec * weight; /* @@ -4159,12 +4167,10 @@ need_resched_nonpreemptible: clear_tsk_need_resched(prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { - if (unlikely((prev->state & TASK_INTERRUPTIBLE) && - signal_pending(prev))) { + if (unlikely(signal_pending_state(prev->state, prev))) prev->state = TASK_RUNNING; - } else { + else deactivate_task(rq, prev, 1); - } switch_count = &prev->nvcsw; } |