diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/dma/debug.c | 1 | ||||
| -rw-r--r-- | kernel/liveupdate/luo_session.c | 9 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
| -rw-r--r-- | kernel/sys.c | 30 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 27 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 1 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 8 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 1 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_probe.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 14 |
11 files changed, 70 insertions, 28 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 0677918f06a8..1a725edbbbf6 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -615,6 +615,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !(entry->is_cache_clean && overlap_cache_clean) && + dma_get_cache_alignment() >= L1_CACHE_BYTES && !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && is_swiotlb_active(entry->dev))) { err_printk(entry->dev, entry, diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c index 783677295640..25ae704d7787 100644 --- a/kernel/liveupdate/luo_session.c +++ b/kernel/liveupdate/luo_session.c @@ -558,8 +558,13 @@ int luo_session_deserialize(void) } scoped_guard(mutex, &session->mutex) { - luo_file_deserialize(&session->file_set, - &sh->ser[i].file_set_ser); + err = luo_file_deserialize(&session->file_set, + &sh->ser[i].file_set_ser); + } + if (err) { + pr_warn("Failed to deserialize files for session [%s] %pe\n", + session->name, ERR_PTR(err)); + return err; } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d08b00429323..674de6a48551 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1027,7 +1027,7 @@ static void update_dl_entity(struct sched_dl_entity *dl_se) if (dl_time_before(dl_se->deadline, rq_clock(rq)) || dl_entity_overflow(dl_se, rq_clock(rq))) { - if (unlikely(!dl_is_implicit(dl_se) && + if (unlikely((!dl_is_implicit(dl_se) || dl_se->dl_defer) && !dl_time_before(dl_se->deadline, rq_clock(rq)) && !is_dl_boosted(dl_se))) { update_dl_revised_wakeup(dl_se, rq); diff --git a/kernel/sys.c b/kernel/sys.c index c86eba9aa7e9..62e842055cc9 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2388,17 +2388,18 @@ int __weak arch_lock_shadow_stack_status(struct task_struct *t, unsigned long st return -EINVAL; } -int __weak arch_get_indir_br_lp_status(struct task_struct *t, unsigned long __user *status) +int __weak arch_prctl_get_branch_landing_pad_state(struct task_struct *t, + unsigned long __user *state) { return -EINVAL; } -int __weak arch_set_indir_br_lp_status(struct task_struct *t, unsigned long status) +int __weak arch_prctl_set_branch_landing_pad_state(struct task_struct *t, unsigned long state) { return -EINVAL; } -int __weak arch_lock_indir_br_lp_status(struct task_struct *t, unsigned long status) +int __weak arch_prctl_lock_branch_landing_pad_state(struct task_struct *t) { return -EINVAL; } @@ -2888,20 +2889,23 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = rseq_slice_extension_prctl(arg2, arg3); break; - case PR_GET_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + case PR_GET_CFI: + if (arg2 != PR_CFI_BRANCH_LANDING_PADS) return -EINVAL; - error = arch_get_indir_br_lp_status(me, (unsigned long __user *)arg2); - break; - case PR_SET_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + if (arg4 || arg5) return -EINVAL; - error = arch_set_indir_br_lp_status(me, arg2); + error = arch_prctl_get_branch_landing_pad_state(me, (unsigned long __user *)arg3); break; - case PR_LOCK_INDIR_BR_LP_STATUS: - if (arg3 || arg4 || arg5) + case PR_SET_CFI: + if (arg2 != PR_CFI_BRANCH_LANDING_PADS) return -EINVAL; - error = arch_lock_indir_br_lp_status(me, arg2); + if (arg4 || arg5) + return -EINVAL; + error = arch_prctl_set_branch_landing_pad_state(me, arg3); + if (error) + break; + if (arg3 & PR_CFI_LOCK && !(arg3 & PR_CFI_DISABLE)) + error = arch_prctl_lock_branch_landing_pad_state(me); break; default: trace_task_prctl_unknown(option, arg2, arg3, arg4, arg5); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index eaae1ce9f060..38570998a19b 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -172,6 +172,7 @@ void clockevents_shutdown(struct clock_event_device *dev) { clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; } /** @@ -305,7 +306,6 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, { unsigned long long clc; int64_t delta; - int rc; if (WARN_ON_ONCE(expires < 0)) return -ETIME; @@ -324,16 +324,27 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, return dev->set_next_ktime(expires, dev); delta = ktime_to_ns(ktime_sub(expires, ktime_get())); - if (delta <= 0) - return force ? clockevents_program_min_delta(dev) : -ETIME; - delta = min(delta, (int64_t) dev->max_delta_ns); - delta = max(delta, (int64_t) dev->min_delta_ns); + /* Required for tick_periodic() during early boot */ + if (delta <= 0 && !force) + return -ETIME; + + if (delta > (int64_t)dev->min_delta_ns) { + delta = min(delta, (int64_t) dev->max_delta_ns); + clc = ((unsigned long long) delta * dev->mult) >> dev->shift; + if (!dev->set_next_event((unsigned long) clc, dev)) + return 0; + } - clc = ((unsigned long long) delta * dev->mult) >> dev->shift; - rc = dev->set_next_event((unsigned long) clc, dev); + if (dev->next_event_forced) + return 0; - return (rc && force) ? clockevents_program_min_delta(dev) : rc; + if (dev->set_next_event(dev->min_delta_ticks, dev)) { + if (!force || clockevents_program_min_delta(dev)) + return -ETIME; + } + dev->next_event_forced = 1; + return 0; } /* diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 860af7a58428..1e37142fe52f 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1888,6 +1888,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) BUG_ON(!cpu_base->hres_active); cpu_base->nr_events++; dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; raw_spin_lock_irqsave(&cpu_base->lock, flags); entry_time = now = hrtimer_update_base(cpu_base); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f63c65881364..7e57fa31ee26 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -76,8 +76,10 @@ const struct clock_event_device *tick_get_wakeup_device(int cpu) */ static void tick_broadcast_start_periodic(struct clock_event_device *bc) { - if (bc) + if (bc) { + bc->next_event_forced = 0; tick_setup_periodic(bc, 1); + } } /* @@ -403,6 +405,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) bool bc_local; raw_spin_lock(&tick_broadcast_lock); + tick_broadcast_device.evtdev->next_event_forced = 0; /* Handle spurious interrupts gracefully */ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { @@ -696,6 +699,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) raw_spin_lock(&tick_broadcast_lock); dev->next_event = KTIME_MAX; + tick_broadcast_device.evtdev->next_event_forced = 0; next_event = KTIME_MAX; cpumask_clear(tmpmask); now = ktime_get(); @@ -1063,6 +1067,7 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, bc->event_handler = tick_handle_oneshot_broadcast; + bc->next_event_forced = 0; bc->next_event = KTIME_MAX; /* @@ -1175,6 +1180,7 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) } /* This moves the broadcast assignment to this CPU: */ + bc->next_event_forced = 0; clockevents_program_event(bc, bc->next_event, 1); } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index d305d8521896..6a9198a4279b 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -110,6 +110,7 @@ void tick_handle_periodic(struct clock_event_device *dev) int cpu = smp_processor_id(); ktime_t next = dev->next_event; + dev->next_event_forced = 0; tick_periodic(cpu); /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f7907fadd63f..d1f27df1e60e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -345,7 +345,7 @@ static bool check_tick_dependency(atomic_t *dep) int val = atomic_read(dep); if (likely(!tracepoint_enabled(tick_stop))) - return !val; + return !!val; if (val & TICK_DEP_MASK_POSIX_TIMER) { trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); @@ -1513,6 +1513,7 @@ static void tick_nohz_lowres_handler(struct clock_event_device *dev) struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); dev->next_event = KTIME_MAX; + dev->next_event_forced = 0; if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART)) tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index e0a5dc86c07e..e1c73065dae5 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1068,7 +1068,7 @@ static int __parse_imm_string(char *str, char **pbuf, int offs) { size_t len = strlen(str); - if (str[len - 1] != '"') { + if (!len || str[len - 1] != '"') { trace_probe_log_err(offs + len, IMMSTR_NO_CLOSE); return -EINVAL; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index eda756556341..c6ea96d5b716 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1849,8 +1849,20 @@ static void unplug_oldest_pwq(struct workqueue_struct *wq) raw_spin_lock_irq(&pwq->pool->lock); if (pwq->plugged) { pwq->plugged = false; - if (pwq_activate_first_inactive(pwq, true)) + if (pwq_activate_first_inactive(pwq, true)) { + /* + * While plugged, queueing skips activation which + * includes bumping the nr_active count and adding the + * pwq to nna->pending_pwqs if the count can't be + * obtained. We need to restore both for the pwq being + * unplugged. The first call activates the first + * inactive work item and the second, if there are more + * inactive, puts the pwq on pending_pwqs. + */ + pwq_activate_first_inactive(pwq, false); + kick_pool(pwq->pool); + } } raw_spin_unlock_irq(&pwq->pool->lock); } |
