diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 9 | ||||
-rw-r--r-- | kernel/kprobes.c | 565 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 12 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 22 | ||||
-rw-r--r-- | kernel/power/suspend.c | 5 | ||||
-rw-r--r-- | kernel/power/swap.c | 53 | ||||
-rw-r--r-- | kernel/power/user.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 8 |
8 files changed, 456 insertions, 220 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 21aa7b3001fb..676149a4ac5f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -914,6 +914,15 @@ NORET_TYPE void do_exit(long code) if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); + /* + * If do_exit is called because this processes oopsed, it's possible + * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before + * continuing. Amongst other possible reasons, this is to prevent + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + */ + set_fs(USER_DS); + tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9737a76e106f..7663e5df0e6f 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -354,13 +354,20 @@ static inline int kprobe_aggrprobe(struct kprobe *p) return p->pre_handler == aggr_pre_handler; } +/* Return true(!0) if the kprobe is unused */ +static inline int kprobe_unused(struct kprobe *p) +{ + return kprobe_aggrprobe(p) && kprobe_disabled(p) && + list_empty(&p->list); +} + /* * Keep all fields in the kprobe consistent */ -static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) +static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) { - memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); - memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); + memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); + memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); } #ifdef CONFIG_OPTPROBES @@ -384,6 +391,17 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) } } +/* Free optimized instructions and optimized_kprobe */ +static __kprobes void free_aggr_kprobe(struct kprobe *p) +{ + struct optimized_kprobe *op; + + op = container_of(p, struct optimized_kprobe, kp); + arch_remove_optimized_kprobe(op); + arch_remove_kprobe(p); + kfree(op); +} + /* Return true(!0) if the kprobe is ready for optimization. */ static inline int kprobe_optready(struct kprobe *p) { @@ -397,6 +415,33 @@ static inline int kprobe_optready(struct kprobe *p) return 0; } +/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ +static inline int kprobe_disarmed(struct kprobe *p) +{ + struct optimized_kprobe *op; + + /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ + if (!kprobe_aggrprobe(p)) + return kprobe_disabled(p); + + op = container_of(p, struct optimized_kprobe, kp); + + return kprobe_disabled(p) && list_empty(&op->list); +} + +/* Return true(!0) if the probe is queued on (un)optimizing lists */ +static int __kprobes kprobe_queued(struct kprobe *p) +{ + struct optimized_kprobe *op; + + if (kprobe_aggrprobe(p)) { + op = container_of(p, struct optimized_kprobe, kp); + if (!list_empty(&op->list)) + return 1; + } + return 0; +} + /* * Return an optimized kprobe whose optimizing code replaces * instructions including addr (exclude breakpoint). @@ -422,30 +467,23 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) /* Optimization staging list, protected by kprobe_mutex */ static LIST_HEAD(optimizing_list); +static LIST_HEAD(unoptimizing_list); static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); +static DECLARE_COMPLETION(optimizer_comp); #define OPTIMIZE_DELAY 5 -/* Kprobe jump optimizer */ -static __kprobes void kprobe_optimizer(struct work_struct *work) +/* + * Optimize (replace a breakpoint with a jump) kprobes listed on + * optimizing_list. + */ +static __kprobes void do_optimize_kprobes(void) { - struct optimized_kprobe *op, *tmp; - - /* Lock modules while optimizing kprobes */ - mutex_lock(&module_mutex); - mutex_lock(&kprobe_mutex); - if (kprobes_all_disarmed || !kprobes_allow_optimization) - goto end; - - /* - * Wait for quiesence period to ensure all running interrupts - * are done. Because optprobe may modify multiple instructions - * there is a chance that Nth instruction is interrupted. In that - * case, running interrupt can return to 2nd-Nth byte of jump - * instruction. This wait is for avoiding it. - */ - synchronize_sched(); + /* Optimization never be done when disarmed */ + if (kprobes_all_disarmed || !kprobes_allow_optimization || + list_empty(&optimizing_list)) + return; /* * The optimization/unoptimization refers online_cpus via @@ -459,17 +497,111 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) */ get_online_cpus(); mutex_lock(&text_mutex); - list_for_each_entry_safe(op, tmp, &optimizing_list, list) { - WARN_ON(kprobe_disabled(&op->kp)); - if (arch_optimize_kprobe(op) < 0) - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - list_del_init(&op->list); + arch_optimize_kprobes(&optimizing_list); + mutex_unlock(&text_mutex); + put_online_cpus(); +} + +/* + * Unoptimize (replace a jump with a breakpoint and remove the breakpoint + * if need) kprobes listed on unoptimizing_list. + */ +static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) +{ + struct optimized_kprobe *op, *tmp; + + /* Unoptimization must be done anytime */ + if (list_empty(&unoptimizing_list)) + return; + + /* Ditto to do_optimize_kprobes */ + get_online_cpus(); + mutex_lock(&text_mutex); + arch_unoptimize_kprobes(&unoptimizing_list, free_list); + /* Loop free_list for disarming */ + list_for_each_entry_safe(op, tmp, free_list, list) { + /* Disarm probes if marked disabled */ + if (kprobe_disabled(&op->kp)) + arch_disarm_kprobe(&op->kp); + if (kprobe_unused(&op->kp)) { + /* + * Remove unused probes from hash list. After waiting + * for synchronization, these probes are reclaimed. + * (reclaiming is done by do_free_cleaned_kprobes.) + */ + hlist_del_rcu(&op->kp.hlist); + } else + list_del_init(&op->list); } mutex_unlock(&text_mutex); put_online_cpus(); -end: +} + +/* Reclaim all kprobes on the free_list */ +static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) +{ + struct optimized_kprobe *op, *tmp; + + list_for_each_entry_safe(op, tmp, free_list, list) { + BUG_ON(!kprobe_unused(&op->kp)); + list_del_init(&op->list); + free_aggr_kprobe(&op->kp); + } +} + +/* Start optimizer after OPTIMIZE_DELAY passed */ +static __kprobes void kick_kprobe_optimizer(void) +{ + if (!delayed_work_pending(&optimizing_work)) + schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); +} + +/* Kprobe jump optimizer */ +static __kprobes void kprobe_optimizer(struct work_struct *work) +{ + LIST_HEAD(free_list); + + /* Lock modules while optimizing kprobes */ + mutex_lock(&module_mutex); + mutex_lock(&kprobe_mutex); + + /* + * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) + * kprobes before waiting for quiesence period. + */ + do_unoptimize_kprobes(&free_list); + + /* + * Step 2: Wait for quiesence period to ensure all running interrupts + * are done. Because optprobe may modify multiple instructions + * there is a chance that Nth instruction is interrupted. In that + * case, running interrupt can return to 2nd-Nth byte of jump + * instruction. This wait is for avoiding it. + */ + synchronize_sched(); + + /* Step 3: Optimize kprobes after quiesence period */ + do_optimize_kprobes(); + + /* Step 4: Free cleaned kprobes after quiesence period */ + do_free_cleaned_kprobes(&free_list); + mutex_unlock(&kprobe_mutex); mutex_unlock(&module_mutex); + + /* Step 5: Kick optimizer again if needed */ + if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) + kick_kprobe_optimizer(); + else + /* Wake up all waiters */ + complete_all(&optimizer_comp); +} + +/* Wait for completing optimization and unoptimization */ +static __kprobes void wait_for_kprobe_optimizer(void) +{ + if (delayed_work_pending(&optimizing_work)) + wait_for_completion(&optimizer_comp); } /* Optimize kprobe if p is ready to be optimized */ @@ -495,42 +627,99 @@ static __kprobes void optimize_kprobe(struct kprobe *p) /* Check if it is already optimized. */ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) return; - op->kp.flags |= KPROBE_FLAG_OPTIMIZED; - list_add(&op->list, &optimizing_list); - if (!delayed_work_pending(&optimizing_work)) - schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); + + if (!list_empty(&op->list)) + /* This is under unoptimizing. Just dequeue the probe */ + list_del_init(&op->list); + else { + list_add(&op->list, &optimizing_list); + kick_kprobe_optimizer(); + } +} + +/* Short cut to direct unoptimizing */ +static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) +{ + get_online_cpus(); + arch_unoptimize_kprobe(op); + put_online_cpus(); + if (kprobe_disabled(&op->kp)) + arch_disarm_kprobe(&op->kp); } /* Unoptimize a kprobe if p is optimized */ -static __kprobes void unoptimize_kprobe(struct kprobe *p) +static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) { struct optimized_kprobe *op; - if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { - op = container_of(p, struct optimized_kprobe, kp); - if (!list_empty(&op->list)) - /* Dequeue from the optimization queue */ + if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) + return; /* This is not an optprobe nor optimized */ + + op = container_of(p, struct optimized_kprobe, kp); + if (!kprobe_optimized(p)) { + /* Unoptimized or unoptimizing case */ + if (force && !list_empty(&op->list)) { + /* + * Only if this is unoptimizing kprobe and forced, + * forcibly unoptimize it. (No need to unoptimize + * unoptimized kprobe again :) + */ list_del_init(&op->list); - else - /* Replace jump with break */ - arch_unoptimize_kprobe(op); - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + force_unoptimize_kprobe(op); + } + return; + } + + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + if (!list_empty(&op->list)) { + /* Dequeue from the optimization queue */ + list_del_init(&op->list); + return; + } + /* Optimized kprobe case */ + if (force) + /* Forcibly update the code: this is a special case */ + force_unoptimize_kprobe(op); + else { + list_add(&op->list, &unoptimizing_list); + kick_kprobe_optimizer(); } } +/* Cancel unoptimizing for reusing */ +static void reuse_unused_kprobe(struct kprobe *ap) +{ + struct optimized_kprobe *op; + + BUG_ON(!kprobe_unused(ap)); + /* + * Unused kprobe MUST be on the way of delayed unoptimizing (means + * there is still a relative jump) and disabled. + */ + op = container_of(ap, struct optimized_kprobe, kp); + if (unlikely(list_empty(&op->list))) + printk(KERN_WARNING "Warning: found a stray unused " + "aggrprobe@%p\n", ap->addr); + /* Enable the probe again */ + ap->flags &= ~KPROBE_FLAG_DISABLED; + /* Optimize it again (remove from op->list) */ + BUG_ON(!kprobe_optready(ap)); + optimize_kprobe(ap); +} + /* Remove optimized instructions */ static void __kprobes kill_optimized_kprobe(struct kprobe *p) { struct optimized_kprobe *op; op = container_of(p, struct optimized_kprobe, kp); - if (!list_empty(&op->list)) { - /* Dequeue from the optimization queue */ + if (!list_empty(&op->list)) + /* Dequeue from the (un)optimization queue */ list_del_init(&op->list); - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - } - /* Don't unoptimize, because the target code will be freed. */ + + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; + /* Don't touch the code, because it is already freed. */ arch_remove_optimized_kprobe(op); } @@ -543,16 +732,6 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p) arch_prepare_optimized_kprobe(op); } -/* Free optimized instructions and optimized_kprobe */ -static __kprobes void free_aggr_kprobe(struct kprobe *p) -{ - struct optimized_kprobe *op; - - op = container_of(p, struct optimized_kprobe, kp); - arch_remove_optimized_kprobe(op); - kfree(op); -} - /* Allocate new optimized_kprobe and try to prepare optimized instructions */ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) { @@ -587,7 +766,8 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { /* If failed to setup optimizing, fallback to kprobe */ - free_aggr_kprobe(ap); + arch_remove_optimized_kprobe(op); + kfree(op); return; } @@ -631,21 +811,16 @@ static void __kprobes unoptimize_all_kprobes(void) return; kprobes_allow_optimization = false; - printk(KERN_INFO "Kprobes globally unoptimized\n"); - get_online_cpus(); /* For avoiding text_mutex deadlock */ - mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) { if (!kprobe_disabled(p)) - unoptimize_kprobe(p); + unoptimize_kprobe(p, false); } } - - mutex_unlock(&text_mutex); - put_online_cpus(); - /* Allow all currently running kprobes to complete */ - synchronize_sched(); + /* Wait for unoptimizing completion */ + wait_for_kprobe_optimizer(); + printk(KERN_INFO "Kprobes globally unoptimized\n"); } int sysctl_kprobes_optimization; @@ -669,44 +844,60 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, } #endif /* CONFIG_SYSCTL */ +/* Put a breakpoint for a probe. Must be called with text_mutex locked */ static void __kprobes __arm_kprobe(struct kprobe *p) { - struct kprobe *old_p; + struct kprobe *_p; /* Check collision with other optimized kprobes */ - old_p = get_optimized_kprobe((unsigned long)p->addr); - if (unlikely(old_p)) - unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */ + _p = get_optimized_kprobe((unsigned long)p->addr); + if (unlikely(_p)) + /* Fallback to unoptimized kprobe */ + unoptimize_kprobe(_p, true); arch_arm_kprobe(p); optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ } -static void __kprobes __disarm_kprobe(struct kprobe *p) +/* Remove the breakpoint of a probe. Must be called with text_mutex locked */ +static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) { - struct kprobe *old_p; + struct kprobe *_p; - unoptimize_kprobe(p); /* Try to unoptimize */ - arch_disarm_kprobe(p); + unoptimize_kprobe(p, false); /* Try to unoptimize */ - /* If another kprobe was blocked, optimize it. */ - old_p = get_optimized_kprobe((unsigned long)p->addr); - if (unlikely(old_p)) - optimize_kprobe(old_p); + if (!kprobe_queued(p)) { + arch_disarm_kprobe(p); + /* If another kprobe was blocked, optimize it. */ + _p = get_optimized_kprobe((unsigned long)p->addr); + if (unlikely(_p) && reopt) + optimize_kprobe(_p); + } + /* TODO: reoptimize others after unoptimized this probe */ } #else /* !CONFIG_OPTPROBES */ #define optimize_kprobe(p) do {} while (0) -#define unoptimize_kprobe(p) do {} while (0) +#define unoptimize_kprobe(p, f) do {} while (0) #define kill_optimized_kprobe(p) do {} while (0) #define prepare_optimized_kprobe(p) do {} while (0) #define try_to_optimize_kprobe(p) do {} while (0) #define __arm_kprobe(p) arch_arm_kprobe(p) -#define __disarm_kprobe(p) arch_disarm_kprobe(p) +#define __disarm_kprobe(p, o) arch_disarm_kprobe(p) +#define kprobe_disarmed(p) kprobe_disabled(p) +#define wait_for_kprobe_optimizer() do {} while (0) + +/* There should be no unused kprobes can be reused without optimization */ +static void reuse_unused_kprobe(struct kprobe *ap) +{ + printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); + BUG_ON(kprobe_unused(ap)); +} static __kprobes void free_aggr_kprobe(struct kprobe *p) { + arch_remove_kprobe(p); kfree(p); } @@ -732,11 +923,10 @@ static void __kprobes arm_kprobe(struct kprobe *kp) /* Disarm a kprobe with text_mutex */ static void __kprobes disarm_kprobe(struct kprobe *kp) { - get_online_cpus(); /* For avoiding text_mutex deadlock */ + /* Ditto */ mutex_lock(&text_mutex); - __disarm_kprobe(kp); + __disarm_kprobe(kp, true); mutex_unlock(&text_mutex); - put_online_cpus(); } /* @@ -942,7 +1132,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); if (p->break_handler || p->post_handler) - unoptimize_kprobe(ap); /* Fall back to normal kprobe */ + unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ if (p->break_handler) { if (ap->break_handler) @@ -993,19 +1183,21 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) * This is the second or subsequent kprobe at the address - handle * the intricacies */ -static int __kprobes register_aggr_kprobe(struct kprobe *old_p, +static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) { int ret = 0; - struct kprobe *ap = old_p; + struct kprobe *ap = orig_p; - if (!kprobe_aggrprobe(old_p)) { - /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */ - ap = alloc_aggr_kprobe(old_p); + if (!kprobe_aggrprobe(orig_p)) { + /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ + ap = alloc_aggr_kprobe(orig_p); if (!ap) return -ENOMEM; - init_aggr_kprobe(ap, old_p); - } + init_aggr_kprobe(ap, orig_p); + } else if (kprobe_unused(ap)) + /* This probe is going to die. Rescue it */ + reuse_unused_kprobe(ap); if (kprobe_gone(ap)) { /* @@ -1039,23 +1231,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, return add_new_kprobe(ap, p); } -/* Try to disable aggr_kprobe, and return 1 if succeeded.*/ -static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) -{ - struct kprobe *kp; - - list_for_each_entry_rcu(kp, &p->list, list) { - if (!kprobe_disabled(kp)) - /* - * There is an active probe on the list. - * We can't disable aggr_kprobe. - */ - return 0; - } - p->flags |= KPROBE_FLAG_DISABLED; - return 1; -} - static int __kprobes in_kprobes_functions(unsigned long addr) { struct kprobe_blackpoint *kb; @@ -1098,34 +1273,33 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) /* Check passed kprobe is valid and return kprobe in kprobe_table. */ static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) { - struct kprobe *old_p, *list_p; + struct kprobe *ap, *list_p; - old_p = get_kprobe(p->addr); - if (unlikely(!old_p)) + ap = get_kprobe(p->addr); + if (unlikely(!ap)) return NULL; - if (p != old_p) { - list_for_each_entry_rcu(list_p, &old_p->list, list) + if (p != ap) { + list_for_each_entry_rcu(list_p, &ap->list, list) if (list_p == p) /* kprobe p is a valid probe */ goto valid; return NULL; } valid: - return old_p; + return ap; } /* Return error if the kprobe is being re-registered */ static inline int check_kprobe_rereg(struct kprobe *p) { int ret = 0; - struct kprobe *old_p; mutex_lock(&kprobe_mutex); - old_p = __get_valid_kprobe(p); - if (old_p) + if (__get_valid_kprobe(p)) ret = -EINVAL; mutex_unlock(&kprobe_mutex); + return ret; } @@ -1229,67 +1403,121 @@ fail_with_jump_label: } EXPORT_SYMBOL_GPL(register_kprobe); +/* Check if all probes on the aggrprobe are disabled */ +static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) +{ + struct kprobe *kp; + + list_for_each_entry_rcu(kp, &ap->list, list) + if (!kprobe_disabled(kp)) + /* + * There is an active probe on the list. + * We can't disable this ap. + */ + return 0; + + return 1; +} + +/* Disable one kprobe: Make sure called under kprobe_mutex is locked */ +static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) +{ + struct kprobe *orig_p; + + /* Get an original kprobe for return */ + orig_p = __get_valid_kprobe(p); + if (unlikely(orig_p == NULL)) + return NULL; + + if (!kprobe_disabled(p)) { + /* Disable probe if it is a child probe */ + if (p != orig_p) + p->flags |= KPROBE_FLAG_DISABLED; + + /* Try to disarm and disable this/parent probe */ + if (p == orig_p || aggr_kprobe_disabled(orig_p)) { + disarm_kprobe(orig_p); + orig_p->flags |= KPROBE_FLAG_DISABLED; + } + } + + return orig_p; +} + /* * Unregister a kprobe without a scheduler synchronization. */ static int __kprobes __unregister_kprobe_top(struct kprobe *p) { - struct kprobe *old_p, *list_p; + struct kprobe *ap, *list_p; - old_p = __get_valid_kprobe(p); - if (old_p == NULL) + /* Disable kprobe. This will disarm it if needed. */ + ap = __disable_kprobe(p); + if (ap == NULL) return -EINVAL; - if (old_p == p || - (kprobe_aggrprobe(old_p) && - list_is_singular(&old_p->list))) { + if (ap == p) /* - * Only probe on the hash list. Disarm only if kprobes are - * enabled and not gone - otherwise, the breakpoint would - * already have been removed. We save on flushing icache. + * This probe is an independent(and non-optimized) kprobe + * (not an aggrprobe). Remove from the hash list. */ - if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) - disarm_kprobe(old_p); - hlist_del_rcu(&old_p->hlist); - } else { + goto disarmed; + + /* Following process expects this probe is an aggrprobe */ + WARN_ON(!kprobe_aggrprobe(ap)); + + if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) + /* + * !disarmed could be happen if the probe is under delayed + * unoptimizing. + */ + goto disarmed; + else { + /* If disabling probe has special handlers, update aggrprobe */ if (p->break_handler && !kprobe_gone(p)) - old_p->break_handler = NULL; + ap->break_handler = NULL; if (p->post_handler && !kprobe_gone(p)) { - list_for_each_entry_rcu(list_p, &old_p->list, list) { + list_for_each_entry_rcu(list_p, &ap->list, list) { if ((list_p != p) && (list_p->post_handler)) goto noclean; } - old_p->post_handler = NULL; + ap->post_handler = NULL; } noclean: + /* + * Remove from the aggrprobe: this path will do nothing in + * __unregister_kprobe_bottom(). + */ list_del_rcu(&p->list); - if (!kprobe_disabled(old_p)) { - try_to_disable_aggr_kprobe(old_p); - if (!kprobes_all_disarmed) { - if (kprobe_disabled(old_p)) - disarm_kprobe(old_p); - else - /* Try to optimize this probe again */ - optimize_kprobe(old_p); - } - } + if (!kprobe_disabled(ap) && !kprobes_all_disarmed) + /* + * Try to optimize this probe again, because post + * handler may have been changed. + */ + optimize_kprobe(ap); } return 0; + +disarmed: + BUG_ON(!kprobe_disarmed(ap)); + hlist_del_rcu(&ap->hlist); + return 0; } static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) { - struct kprobe *old_p; + struct kprobe *ap; if (list_empty(&p->list)) + /* This is an independent kprobe */ arch_remove_kprobe(p); else if (list_is_singular(&p->list)) { - /* "p" is the last child of an aggr_kprobe */ - old_p = list_entry(p->list.next, struct kprobe, list); + /* This is the last child of an aggrprobe */ + ap = list_entry(p->list.next, struct kprobe, list); list_del(&p->list); - arch_remove_kprobe(old_p); - free_aggr_kprobe(old_p); + free_aggr_kprobe(ap); } + /* Otherwise, do nothing. */ } int __kprobes register_kprobes(struct kprobe **kps, int num) @@ -1607,29 +1835,13 @@ static void __kprobes kill_kprobe(struct kprobe *p) int __kprobes disable_kprobe(struct kprobe *kp) { int ret = 0; - struct kprobe *p; mutex_lock(&kprobe_mutex); - /* Check whether specified probe is valid. */ - p = __get_valid_kprobe(kp); - if (unlikely(p == NULL)) { + /* Disable this kprobe */ + if (__disable_kprobe(kp) == NULL) ret = -EINVAL; - goto out; - } - /* If the probe is already disabled (or gone), just return */ - if (kprobe_disabled(kp)) - goto out; - - kp->flags |= KPROBE_FLAG_DISABLED; - if (p != kp) - /* When kp != p, p is always enabled. */ - try_to_disable_aggr_kprobe(p); - - if (!kprobes_all_disarmed && kprobe_disabled(p)) - disarm_kprobe(p); -out: mutex_unlock(&kprobe_mutex); return ret; } @@ -1927,36 +2139,27 @@ static void __kprobes disarm_all_kprobes(void) mutex_lock(&kprobe_mutex); /* If kprobes are already disarmed, just return */ - if (kprobes_all_disarmed) - goto already_disabled; + if (kprobes_all_disarmed) { + mutex_unlock(&kprobe_mutex); + return; + } kprobes_all_disarmed = true; printk(KERN_INFO "Kprobes globally disabled\n"); - /* - * Here we call get_online_cpus() for avoiding text_mutex deadlock, - * because disarming may also unoptimize kprobes. - */ - get_online_cpus(); mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) { if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) - __disarm_kprobe(p); + __disarm_kprobe(p, false); } } - mutex_unlock(&text_mutex); - put_online_cpus(); mutex_unlock(&kprobe_mutex); - /* Allow all currently running kprobes to complete */ - synchronize_sched(); - return; -already_disabled: - mutex_unlock(&kprobe_mutex); - return; + /* Wait for disarming all kprobes by optimizer */ + wait_for_kprobe_optimizer(); } /* diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 6842eeba5879..05bb7173850e 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -37,13 +37,13 @@ static int check_clock(const clockid_t which_clock) if (pid == 0) return 0; - read_lock(&tasklist_lock); + rcu_read_lock(); p = find_task_by_vpid(pid); if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? - same_thread_group(p, current) : thread_group_leader(p))) { + same_thread_group(p, current) : has_group_leader_pid(p))) { error = -EINVAL; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return error; } @@ -390,7 +390,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) INIT_LIST_HEAD(&new_timer->it.cpu.entry); - read_lock(&tasklist_lock); + rcu_read_lock(); if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { if (pid == 0) { p = current; @@ -404,7 +404,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) p = current->group_leader; } else { p = find_task_by_vpid(pid); - if (p && !thread_group_leader(p)) + if (p && !has_group_leader_pid(p)) p = NULL; } } @@ -414,7 +414,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) } else { ret = -EINVAL; } - read_unlock(&tasklist_lock); + rcu_read_unlock(); return ret; } diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 657272e91d0a..048d0b514831 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -327,7 +327,6 @@ static int create_image(int platform_mode) int hibernation_snapshot(int platform_mode) { int error; - gfp_t saved_mask; error = platform_begin(platform_mode); if (error) @@ -339,7 +338,7 @@ int hibernation_snapshot(int platform_mode) goto Close; suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); error = dpm_suspend_start(PMSG_FREEZE); if (error) goto Recover_platform; @@ -348,7 +347,10 @@ int hibernation_snapshot(int platform_mode) goto Recover_platform; error = create_image(platform_mode); - /* Control returns here after successful restore */ + /* + * Control returns here (1) after the image has been created or the + * image creation has failed and (2) after a successful restore. + */ Resume_devices: /* We may need to release the preallocated image pages here. */ @@ -357,7 +359,10 @@ int hibernation_snapshot(int platform_mode) dpm_resume_end(in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); - set_gfp_allowed_mask(saved_mask); + + if (error || !in_suspend) + pm_restore_gfp_mask(); + resume_console(); Close: platform_end(platform_mode); @@ -452,17 +457,16 @@ static int resume_target_kernel(bool platform_mode) int hibernation_restore(int platform_mode) { int error; - gfp_t saved_mask; pm_prepare_console(); suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); error = dpm_suspend_start(PMSG_QUIESCE); if (!error) { error = resume_target_kernel(platform_mode); dpm_resume_end(PMSG_RECOVER); } - set_gfp_allowed_mask(saved_mask); + pm_restore_gfp_mask(); resume_console(); pm_restore_console(); return error; @@ -476,7 +480,6 @@ int hibernation_restore(int platform_mode) int hibernation_platform_enter(void) { int error; - gfp_t saved_mask; if (!hibernation_ops) return -ENOSYS; @@ -492,7 +495,6 @@ int hibernation_platform_enter(void) entering_platform_hibernation = true; suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); error = dpm_suspend_start(PMSG_HIBERNATE); if (error) { if (hibernation_ops->recover) @@ -536,7 +538,6 @@ int hibernation_platform_enter(void) Resume_devices: entering_platform_hibernation = false; dpm_resume_end(PMSG_RESTORE); - set_gfp_allowed_mask(saved_mask); resume_console(); Close: @@ -646,6 +647,7 @@ int hibernate(void) swsusp_free(); if (!error) power_down(); + pm_restore_gfp_mask(); } else { pr_debug("PM: Image restored successfully.\n"); } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 7335952ee473..ecf770509d0d 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -197,7 +197,6 @@ static int suspend_enter(suspend_state_t state) int suspend_devices_and_enter(suspend_state_t state) { int error; - gfp_t saved_mask; if (!suspend_ops) return -ENOSYS; @@ -208,7 +207,7 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -225,7 +224,7 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); - set_gfp_allowed_mask(saved_mask); + pm_restore_gfp_mask(); resume_console(); Close: if (suspend_ops->end) diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a0e4a86ccf94..baf667bb2794 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -6,6 +6,7 @@ * * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> + * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com> * * This file is released under the GPLv2. * @@ -753,30 +754,43 @@ static int load_image_lzo(struct swap_map_handle *handle, { unsigned int m; int error = 0; + struct bio *bio; struct timeval start; struct timeval stop; unsigned nr_pages; - size_t off, unc_len, cmp_len; - unsigned char *unc, *cmp, *page; + size_t i, off, unc_len, cmp_len; + unsigned char *unc, *cmp, *page[LZO_CMP_PAGES]; - page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); - if (!page) { - printk(KERN_ERR "PM: Failed to allocate LZO page\n"); - return -ENOMEM; + for (i = 0; i < LZO_CMP_PAGES; i++) { + page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); + if (!page[i]) { + printk(KERN_ERR "PM: Failed to allocate LZO page\n"); + + while (i) + free_page((unsigned long)page[--i]); + + return -ENOMEM; + } } unc = vmalloc(LZO_UNC_SIZE); if (!unc) { printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); - free_page((unsigned long)page); + + for (i = 0; i < LZO_CMP_PAGES; i++) + free_page((unsigned long)page[i]); + return -ENOMEM; } cmp = vmalloc(LZO_CMP_SIZE); if (!cmp) { printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); + vfree(unc); - free_page((unsigned long)page); + for (i = 0; i < LZO_CMP_PAGES; i++) + free_page((unsigned long)page[i]); + return -ENOMEM; } @@ -787,6 +801,7 @@ static int load_image_lzo(struct swap_map_handle *handle, if (!m) m = 1; nr_pages = 0; + bio = NULL; do_gettimeofday(&start); error = snapshot_write_next(snapshot); @@ -794,11 +809,11 @@ static int load_image_lzo(struct swap_map_handle *handle, goto out_finish; for (;;) { - error = swap_read_page(handle, page, NULL); /* sync */ + error = swap_read_page(handle, page[0], NULL); /* sync */ if (error) break; - cmp_len = *(size_t *)page; + cmp_len = *(size_t *)page[0]; if (unlikely(!cmp_len || cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { printk(KERN_ERR "PM: Invalid LZO compressed length\n"); @@ -806,13 +821,20 @@ static int load_image_lzo(struct swap_map_handle *handle, break; } - memcpy(cmp, page, PAGE_SIZE); - for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { - error = swap_read_page(handle, page, NULL); /* sync */ + for (off = PAGE_SIZE, i = 1; + off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { + error = swap_read_page(handle, page[i], &bio); if (error) goto out_finish; + } - memcpy(cmp + off, page, PAGE_SIZE); + error = hib_wait_on_bio_chain(&bio); /* need all data now */ + if (error) + goto out_finish; + + for (off = 0, i = 0; + off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) { + memcpy(cmp + off, page[i], PAGE_SIZE); } unc_len = LZO_UNC_SIZE; @@ -857,7 +879,8 @@ out_finish: vfree(cmp); vfree(unc); - free_page((unsigned long)page); + for (i = 0; i < LZO_CMP_PAGES; i++) + free_page((unsigned long)page[i]); return error; } diff --git a/kernel/power/user.c b/kernel/power/user.c index e819e17877ca..1b2ea31e6bd8 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; + pm_restore_gfp_mask(); thaw_processes(); usermodehelper_enable(); data->frozen = 0; @@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, error = -EPERM; break; } + pm_restore_gfp_mask(); error = hibernation_snapshot(data->platform_support); if (!error) error = put_user(in_suspend, (int __user *)arg); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 52ab113d8bb9..00ebd7686676 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1758,10 +1758,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); check_preempt_curr(this_rq, p, 0); - - /* re-arm NEWIDLE balancing when moving tasks */ - src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; - this_rq->idle_stamp = 0; } /* @@ -3219,8 +3215,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) + if (pulled_task) { + this_rq->idle_stamp = 0; break; + } } raw_spin_lock(&this_rq->lock); |