diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup/cgroup.c | 5 | ||||
| -rw-r--r-- | kernel/cgroup/cpuset.c | 5 | ||||
| -rw-r--r-- | kernel/cgroup/legacy_freezer.c | 9 | ||||
| -rw-r--r-- | kernel/dma/pool.c | 27 | ||||
| -rw-r--r-- | kernel/events/core.c | 9 | ||||
| -rw-r--r-- | kernel/panic.c | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 18 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 36 | ||||
| -rw-r--r-- | kernel/sched/ext.c | 1 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 16 | ||||
| -rw-r--r-- | kernel/sched/features.h | 2 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 6 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 27 | ||||
| -rw-r--r-- | kernel/sched/syscalls.c | 32 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 2 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 2 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace_events_hist.c | 9 | ||||
| -rw-r--r-- | kernel/trace/trace_events_synth.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 2 |
21 files changed, 122 insertions, 108 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 554a02ee298b..5f0d33b04910 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Generic process-grouping system. * @@ -20,10 +21,6 @@ * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. * --------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 3e8cc34d8d50..c06e2e96f79d 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * kernel/cpuset.c * @@ -16,10 +17,6 @@ * 2006 Rework by Paul Menage to use generic cgroups * 2008 Rework of the scheduler domains and CPU hotplug handling * by Max Krasnyansky - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. */ #include "cpuset-internal.h" diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 915b02f65980..817c33450fee 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -1,17 +1,10 @@ +// SPDX-License-Identifier: LGPL-2.1 /* * cgroup_freezer.c - control group freezer subsystem * * Copyright IBM Corporation, 2007 * * Author : Cedric Le Goater <clg@fr.ibm.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2.1 of the GNU Lesser General Public License - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/export.h> diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 26392badc36b..c5da29ad010c 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size, return pool; } +#ifdef CONFIG_ZONE_DMA32 +#define has_managed_dma32 has_managed_zone(ZONE_DMA32) +#else +#define has_managed_dma32 false +#endif + static int __init dma_atomic_pool_init(void) { int ret = 0; @@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void) } INIT_WORK(&atomic_pool_work, atomic_pool_work_fn); - atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, + /* All memory might be in the DMA zone(s) to begin with */ + if (has_managed_zone(ZONE_NORMAL)) { + atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL); - if (!atomic_pool_kernel) - ret = -ENOMEM; + if (!atomic_pool_kernel) + ret = -ENOMEM; + } if (has_managed_dma()) { atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA); if (!atomic_pool_dma) ret = -ENOMEM; } - if (IS_ENABLED(CONFIG_ZONE_DMA32)) { + if (has_managed_dma32) { atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA32); if (!atomic_pool_dma32) @@ -224,11 +233,11 @@ postcore_initcall(dma_atomic_pool_init); static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) { if (prev == NULL) { - if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) - return atomic_pool_dma32; - if (atomic_pool_dma && (gfp & GFP_DMA)) - return atomic_pool_dma; - return atomic_pool_kernel; + if (gfp & GFP_DMA) + return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel; + if (gfp & GFP_DMA32) + return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel; + return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma; } if (prev == atomic_pool_kernel) return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma; diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b5cb620499e..a0fa488bce84 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6997,6 +6997,15 @@ static int perf_mmap_rb(struct vm_area_struct *vma, struct perf_event *event, if (data_page_nr(event->rb) != nr_pages) return -EINVAL; + /* + * If this event doesn't have mmap_count, we're attempting to + * create an alias of another event's mmap(); this would mean + * both events will end up scribbling the same user_page; + * which makes no sense. + */ + if (!refcount_read(&event->mmap_count)) + return -EBUSY; + if (refcount_inc_not_zero(&event->rb->mmap_count)) { /* * Success -- managed to mmap() the same buffer diff --git a/kernel/panic.c b/kernel/panic.c index 0d52210a9e2b..0c20fcaae98a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -131,7 +131,8 @@ static int proc_taint(const struct ctl_table *table, int write, static int sysctl_panic_print_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { - panic_print_deprecated(); + if (write) + panic_print_deprecated(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } @@ -1014,7 +1015,6 @@ static int panic_print_set(const char *val, const struct kernel_param *kp) static int panic_print_get(char *val, const struct kernel_param *kp) { - panic_print_deprecated(); return param_get_ulong(val, kp); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 60afadb6eede..045f83ad261e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4950,9 +4950,13 @@ struct balance_callback *splice_balance_callbacks(struct rq *rq) return __splice_balance_callbacks(rq, true); } -static void __balance_callbacks(struct rq *rq) +void __balance_callbacks(struct rq *rq, struct rq_flags *rf) { + if (rf) + rq_unpin_lock(rq, rf); do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); + if (rf) + rq_repin_lock(rq, rf); } void balance_callbacks(struct rq *rq, struct balance_callback *head) @@ -4991,7 +4995,7 @@ static inline void finish_lock_switch(struct rq *rq) * prev into current: */ spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); - __balance_callbacks(rq); + __balance_callbacks(rq, NULL); raw_spin_rq_unlock_irq(rq); } @@ -6867,7 +6871,7 @@ keep_resched: proxy_tag_curr(rq, next); rq_unpin_lock(rq, &rf); - __balance_callbacks(rq); + __balance_callbacks(rq, NULL); raw_spin_rq_unlock_irq(rq); } trace_sched_exit_tp(is_switch); @@ -7316,7 +7320,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) trace_sched_pi_setprio(p, pi_task); oldprio = p->prio; - if (oldprio == prio) + if (oldprio == prio && !dl_prio(prio)) queue_flag &= ~DEQUEUE_MOVE; prev_class = p->sched_class; @@ -7362,9 +7366,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) out_unlock: /* Caller holds task_struct::pi_lock, IRQs are still disabled */ - rq_unpin_lock(rq, &rf); - __balance_callbacks(rq); - rq_repin_lock(rq, &rf); + __balance_callbacks(rq, &rf); __task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_RT_MUTEXES */ @@ -9124,6 +9126,8 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup) if (resched) resched_curr(rq); + + __balance_callbacks(rq, &rq_guard.rf); } static struct cgroup_subsys_state * diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 319439fe1870..c509f2e7d69d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -752,8 +752,6 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); - update_rq_clock(rq); - WARN_ON(is_dl_boosted(dl_se)); WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); @@ -1420,7 +1418,7 @@ update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, int static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) { - bool idle = rq->curr == rq->idle; + bool idle = idle_rq(rq); s64 scaled_delta_exec; if (unlikely(delta_exec <= 0)) { @@ -1603,8 +1601,8 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) * | 8 | B:zero_laxity-wait | | | * | | | <---+ | * | +--------------------------------+ | - * | | ^ ^ 2 | - * | | 7 | 2 +--------------------+ + * | | ^ ^ 2 | + * | | 7 | 2, 1 +----------------+ * | v | * | +-------------+ | * +-- | C:idle-wait | -+ @@ -1649,8 +1647,11 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) * dl_defer_idle = 0 * * - * [1] A->B, A->D + * [1] A->B, A->D, C->B * dl_server_start() + * dl_defer_idle = 0; + * if (dl_server_active) + * return; // [B] * dl_server_active = 1; * enqueue_dl_entity() * update_dl_entity(WAKEUP) @@ -1759,6 +1760,7 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) * "B:zero_laxity-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"] * "B:zero_laxity-wait" -> "D:running" [label="3:dl_server_timer"] * "C:idle-wait" -> "A:init" [label="8:dl_server_timer"] + * "C:idle-wait" -> "B:zero_laxity-wait" [label="1:dl_server_start"] * "C:idle-wait" -> "B:zero_laxity-wait" [label="2:dl_server_update"] * "C:idle-wait" -> "C:idle-wait" [label="7:dl_server_update_idle"] * "D:running" -> "A:init" [label="4:pick_task_dl"] @@ -1784,6 +1786,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) { struct rq *rq = dl_se->rq; + dl_se->dl_defer_idle = 0; if (!dl_server(dl_se) || dl_se->dl_server_active) return; @@ -1834,6 +1837,7 @@ void sched_init_dl_servers(void) rq = cpu_rq(cpu); guard(rq_lock_irq)(rq); + update_rq_clock(rq); dl_se = &rq->fair_server; @@ -2210,7 +2214,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) update_dl_entity(dl_se); } else if (flags & ENQUEUE_REPLENISH) { replenish_dl_entity(dl_se); - } else if ((flags & ENQUEUE_RESTORE) && + } else if ((flags & ENQUEUE_MOVE) && !is_dl_boosted(dl_se) && dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) { setup_new_dl_entity(dl_se); @@ -3154,7 +3158,7 @@ void dl_add_task_root_domain(struct task_struct *p) struct rq *rq; struct dl_bw *dl_b; unsigned int cpu; - struct cpumask *msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); + struct cpumask *msk; raw_spin_lock_irqsave(&p->pi_lock, rf.flags); if (!dl_task(p) || dl_entity_is_special(&p->dl)) { @@ -3162,20 +3166,12 @@ void dl_add_task_root_domain(struct task_struct *p) return; } - /* - * Get an active rq, whose rq->rd traces the correct root - * domain. - * Ideally this would be under cpuset reader lock until rq->rd is - * fetched. However, sleepable locks cannot nest inside pi_lock, so we - * rely on the caller of dl_add_task_root_domain() holds 'cpuset_mutex' - * to guarantee the CPU stays in the cpuset. - */ + msk = this_cpu_cpumask_var_ptr(local_cpu_mask_dl); dl_get_task_effective_cpus(p, msk); cpu = cpumask_first_and(cpu_active_mask, msk); BUG_ON(cpu >= nr_cpu_ids); rq = cpu_rq(cpu); dl_b = &rq->rd->dl_bw; - /* End of fetching rd */ raw_spin_lock(&dl_b->lock); __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); @@ -3299,6 +3295,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) static u64 get_prio_dl(struct rq *rq, struct task_struct *p) { + /* + * Make sure to update current so we don't return a stale value. + */ + if (task_current_donor(rq, p)) + update_curr_dl(rq); + return p->dl.deadline; } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 8f6d8d7f895c..afe28c04d5aa 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -545,6 +545,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter) static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter) { if (iter->locked_task) { + __balance_callbacks(iter->rq, &iter->rf); task_rq_unlock(iter->rq, iter->locked_task, &iter->rf); iter->locked_task = NULL; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e71302282671..3eaeceda71b0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8828,16 +8828,6 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int if ((wake_flags & WF_FORK) || pse->sched_delayed) return; - /* - * If @p potentially is completing work required by current then - * consider preemption. - * - * Reschedule if waker is no longer eligible. */ - if (in_task() && !entity_eligible(cfs_rq, se)) { - preempt_action = PREEMPT_WAKEUP_RESCHED; - goto preempt; - } - /* Prefer picking wakee soon if appropriate. */ if (sched_feat(NEXT_BUDDY) && set_preempt_buddy(cfs_rq, wake_flags, pse, se)) { @@ -8995,12 +8985,6 @@ idle: goto again; } - /* - * rq is about to be idle, check if we need to update the - * lost_idle_time of clock_pelt - */ - update_idle_rq_clock_pelt(rq); - return NULL; } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 980d92bab8ab..136a6584be79 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -29,7 +29,7 @@ SCHED_FEAT(PREEMPT_SHORT, true) * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ -SCHED_FEAT(NEXT_BUDDY, true) +SCHED_FEAT(NEXT_BUDDY, false) /* * Allow completely ignoring cfs_rq->next; which can be set from various diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index c174afe1dd17..abf8f15d60c9 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -468,6 +468,12 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir scx_update_idle(rq, true, true); schedstat_inc(rq->sched_goidle); next->se.exec_start = rq_clock_task(rq); + + /* + * rq is about to be idle, check if we need to update the + * lost_idle_time of clock_pelt + */ + update_idle_rq_clock_pelt(rq); } struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d30cca6870f5..93fce4bbff5e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1364,6 +1364,28 @@ static inline u32 sched_rng(void) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) +static inline bool idle_rq(struct rq *rq) +{ + return rq->curr == rq->idle && !rq->nr_running && !rq->ttwu_pending; +} + +/** + * available_idle_cpu - is a given CPU idle for enqueuing work. + * @cpu: the CPU in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. + */ +static inline bool available_idle_cpu(int cpu) +{ + if (!idle_rq(cpu_rq(cpu))) + return 0; + + if (vcpu_is_preempted(cpu)) + return 0; + + return 1; +} + #ifdef CONFIG_SCHED_PROXY_EXEC static inline void rq_set_donor(struct rq *rq, struct task_struct *t) { @@ -2366,7 +2388,8 @@ extern const u32 sched_prio_to_wmult[40]; * should preserve as much state as possible. * * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location - * in the runqueue. + * in the runqueue. IOW the priority is allowed to change. Callers + * must expect to deal with balance callbacks. * * NOCLOCK - skip the update_rq_clock() (avoids double updates) * @@ -3947,6 +3970,8 @@ extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); extern struct balance_callback *splice_balance_callbacks(struct rq *rq); + +extern void __balance_callbacks(struct rq *rq, struct rq_flags *rf); extern void balance_callbacks(struct rq *rq, struct balance_callback *head); /* diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 0496dc29ed0f..6f10db3646e7 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -180,35 +180,7 @@ int task_prio(const struct task_struct *p) */ int idle_cpu(int cpu) { - struct rq *rq = cpu_rq(cpu); - - if (rq->curr != rq->idle) - return 0; - - if (rq->nr_running) - return 0; - - if (rq->ttwu_pending) - return 0; - - return 1; -} - -/** - * available_idle_cpu - is a given CPU idle for enqueuing work. - * @cpu: the CPU in question. - * - * Return: 1 if the CPU is currently idle. 0 otherwise. - */ -int available_idle_cpu(int cpu) -{ - if (!idle_cpu(cpu)) - return 0; - - if (vcpu_is_preempted(cpu)) - return 0; - - return 1; + return idle_rq(cpu_rq(cpu)); } /** @@ -667,7 +639,7 @@ change: * itself. */ newprio = rt_effective_prio(p, newprio); - if (newprio == oldprio) + if (newprio == oldprio && !dl_prio(newprio)) queue_flags &= ~DEQUEUE_MOVE; } diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index a1890a073196..df7194961658 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -252,7 +252,7 @@ enum wd_read_status { static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow) { - int64_t md = 2 * watchdog->uncertainty_margin; + int64_t md = watchdog->uncertainty_margin; unsigned int nretries, max_retries; int64_t wd_delay, wd_seq_delay; u64 wd_end, wd_end2; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index bdb30cc5e873..0e4bc1ca15ff 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -913,7 +913,7 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, return true; /* Extra check for softirq clock bases */ - if (base->clockid < HRTIMER_BASE_MONOTONIC_SOFT) + if (base->index < HRTIMER_BASE_MONOTONIC_SOFT) continue; if (cpu_base->softirq_activated) continue; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3ec3daa4acab..91fa2003351c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2735,7 +2735,7 @@ static int __do_adjtimex(struct tk_data *tkd, struct __kernel_timex *txc, timekeeping_update_from_shadow(tkd, TK_CLOCK_WAS_SET); result->clock_set = true; } else { - tk_update_leap_state_all(&tk_core); + tk_update_leap_state_all(tkd); } /* Update the multiplier immediately if frequency was set directly */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index baec63134ab6..8bd4ec08fb36 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6115,10 +6115,10 @@ static int cmp_mod_entry(const void *key, const void *pivot) unsigned long addr = (unsigned long)key; const struct trace_mod_entry *ent = pivot; - if (addr >= ent[0].mod_addr && addr < ent[1].mod_addr) - return 0; - else - return addr - ent->mod_addr; + if (addr < ent[0].mod_addr) + return -1; + + return addr >= ent[1].mod_addr; } /** diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 5e6e70540eef..c97bb2fda5c0 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -2057,6 +2057,15 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING; else hist_field->fn_num = HIST_FIELD_FN_PSTRING; + } else if (field->filter_type == FILTER_STACKTRACE) { + flags |= HIST_FIELD_FL_STACKTRACE; + + hist_field->size = MAX_FILTER_STR_VAL; + hist_field->type = kstrdup_const(field->type, GFP_KERNEL); + if (!hist_field->type) + goto free; + + hist_field->fn_num = HIST_FIELD_FN_STACK; } else { hist_field->size = field->size; hist_field->is_signed = field->is_signed; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 4554c458b78c..45c187e77e21 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -130,7 +130,9 @@ static int synth_event_define_fields(struct trace_event_call *call) struct synth_event *event = call->data; unsigned int i, size, n_u64; char *name, *type; + int filter_type; bool is_signed; + bool is_stack; int ret = 0; for (i = 0, n_u64 = 0; i < event->n_fields; i++) { @@ -138,8 +140,12 @@ static int synth_event_define_fields(struct trace_event_call *call) is_signed = event->fields[i]->is_signed; type = event->fields[i]->type; name = event->fields[i]->name; + is_stack = event->fields[i]->is_stack; + + filter_type = is_stack ? FILTER_STACKTRACE : FILTER_OTHER; + ret = trace_define_field(call, type, name, offset, size, - is_signed, FILTER_OTHER); + is_signed, filter_type); if (ret) break; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b1e9c9913309..1de6f1573621 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -901,7 +901,7 @@ static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entr trace_seq_printf(s, "%ps", func); if (args_size >= FTRACE_REGS_MAX_ARGS * sizeof(long)) { - print_function_args(s, entry->args, (unsigned long)func); + print_function_args(s, FGRAPH_ENTRY_ARGS(entry), (unsigned long)func); trace_seq_putc(s, ';'); } else trace_seq_puts(s, "();"); |
