summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2026-01-12 12:15:35 +0000
committerMark Brown <broonie@kernel.org>2026-01-12 12:15:35 +0000
commit6eb6b62f002f1cbc538c2e773539d1d4c37749cb (patch)
treee5170ea004e5cce5ef2e5d2a32c3aa41c4f18fa6 /kernel
parentb0655377aa5a410df02d89170c20141a1a5bbc28 (diff)
parent8d38423d9dea7353a8a54a3ab2e0d0aa04ed34d0 (diff)
regulator: core: allow regulator_register() with
Merge series from André Draszik <andre.draszik@linaro.org>: With these attached patches it becomes possible again to support hardware designs with multiple PMICs where individual rails of each act as required supplies for rails of the other (due to the latter being e.g. always-on), and vice-versa. Google Pixel 6 and 6 Pro (oriole and raven) are examples of such designs. Rather than returning -EPORBE_DEFER in regulator_register() when set_machine_constraints() fails with -EPROBE_DEFER (due to missing required supplies), we still allow rail registration and try to reresolve supplies each time a new rail gets registered. This is implemented using a bus (regulator bus), which allows the core to reresolve supplies for regulators that still need them whenever new regulators (i.e. devices) are added. Using a bus also solves existing problems around late resolution of supplies as mentioned in the commit message introducing that bus. The series starts with a few bug fixes and the last two commits implement the changes mentioned above, but do depend on the bug fixes.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c16
-rw-r--r--kernel/bpf/dmabuf_iter.c56
-rw-r--r--kernel/cgroup/cpuset.c21
-rw-r--r--kernel/cgroup/rstat.c13
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/kexec_core.c16
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/power/em_netlink_autogen.c1
-rw-r--r--kernel/power/em_netlink_autogen.h1
-rw-r--r--kernel/power/suspend.c9
-rw-r--r--kernel/sched/ext.c95
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--kernel/trace/ftrace.c7
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events.c2
15 files changed, 178 insertions, 66 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c8ae6ab31651..1b9b18e5b03c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -760,6 +760,22 @@ struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
NULL;
}
+bool bpf_has_frame_pointer(unsigned long ip)
+{
+ struct bpf_ksym *ksym;
+ unsigned long offset;
+
+ guard(rcu)();
+
+ ksym = bpf_ksym_find(ip);
+ if (!ksym || !ksym->fp_start || !ksym->fp_end)
+ return false;
+
+ offset = ip - ksym->start;
+
+ return offset >= ksym->fp_start && offset < ksym->fp_end;
+}
+
const struct exception_table_entry *search_bpf_extables(unsigned long addr)
{
const struct exception_table_entry *e = NULL;
diff --git a/kernel/bpf/dmabuf_iter.c b/kernel/bpf/dmabuf_iter.c
index 4dd7ef7c145c..cd500248abd9 100644
--- a/kernel/bpf/dmabuf_iter.c
+++ b/kernel/bpf/dmabuf_iter.c
@@ -6,10 +6,33 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
+struct dmabuf_iter_priv {
+ /*
+ * If this pointer is non-NULL, the buffer's refcount is elevated to
+ * prevent destruction between stop/start. If reading is not resumed and
+ * start is never called again, then dmabuf_iter_seq_fini drops the
+ * reference when the iterator is released.
+ */
+ struct dma_buf *dmabuf;
+};
+
static void *dmabuf_iter_seq_start(struct seq_file *seq, loff_t *pos)
{
- if (*pos)
- return NULL;
+ struct dmabuf_iter_priv *p = seq->private;
+
+ if (*pos) {
+ struct dma_buf *dmabuf = p->dmabuf;
+
+ if (!dmabuf)
+ return NULL;
+
+ /*
+ * Always resume from where we stopped, regardless of the value
+ * of pos.
+ */
+ p->dmabuf = NULL;
+ return dmabuf;
+ }
return dma_buf_iter_begin();
}
@@ -54,8 +77,11 @@ static void dmabuf_iter_seq_stop(struct seq_file *seq, void *v)
{
struct dma_buf *dmabuf = v;
- if (dmabuf)
- dma_buf_put(dmabuf);
+ if (dmabuf) {
+ struct dmabuf_iter_priv *p = seq->private;
+
+ p->dmabuf = dmabuf;
+ }
}
static const struct seq_operations dmabuf_iter_seq_ops = {
@@ -71,11 +97,27 @@ static void bpf_iter_dmabuf_show_fdinfo(const struct bpf_iter_aux_info *aux,
seq_puts(seq, "dmabuf iter\n");
}
+static int dmabuf_iter_seq_init(void *priv, struct bpf_iter_aux_info *aux)
+{
+ struct dmabuf_iter_priv *p = (struct dmabuf_iter_priv *)priv;
+
+ p->dmabuf = NULL;
+ return 0;
+}
+
+static void dmabuf_iter_seq_fini(void *priv)
+{
+ struct dmabuf_iter_priv *p = (struct dmabuf_iter_priv *)priv;
+
+ if (p->dmabuf)
+ dma_buf_put(p->dmabuf);
+}
+
static const struct bpf_iter_seq_info dmabuf_iter_seq_info = {
.seq_ops = &dmabuf_iter_seq_ops,
- .init_seq_private = NULL,
- .fini_seq_private = NULL,
- .seq_priv_size = 0,
+ .init_seq_private = dmabuf_iter_seq_init,
+ .fini_seq_private = dmabuf_iter_seq_fini,
+ .seq_priv_size = sizeof(struct dmabuf_iter_priv),
};
static struct bpf_iter_reg bpf_dmabuf_reg_info = {
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 6e6eb09b8db6..3e8cc34d8d50 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1668,7 +1668,14 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
{
WARN_ON_ONCE(!is_remote_partition(cs));
- WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
+ /*
+ * When a CPU is offlined, top_cpuset may end up with no available CPUs,
+ * which should clear subpartitions_cpus. We should not emit a warning for this
+ * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
+ * may already be cleared when disabling the partition.
+ */
+ WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
+ !cpumask_empty(subpartitions_cpus));
spin_lock_irq(&callback_lock);
cs->remote_partition = false;
@@ -3976,8 +3983,9 @@ retry:
if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
compute_partition_effective_cpumask(cs, &new_cpus);
- if (remote && cpumask_empty(&new_cpus) &&
- partition_is_populated(cs, NULL)) {
+ if (remote && (cpumask_empty(subpartitions_cpus) ||
+ (cpumask_empty(&new_cpus) &&
+ partition_is_populated(cs, NULL)))) {
cs->prs_err = PERR_HOTPLUG;
remote_partition_disable(cs, tmp);
compute_effective_cpumask(&new_cpus, cs, parent);
@@ -3990,9 +3998,12 @@ retry:
* 1) empty effective cpus but not valid empty partition.
* 2) parent is invalid or doesn't grant any cpus to child
* partitions.
+ * 3) subpartitions_cpus is empty.
*/
- if (is_local_partition(cs) && (!is_partition_valid(parent) ||
- tasks_nocpu_error(parent, cs, &new_cpus)))
+ if (is_local_partition(cs) &&
+ (!is_partition_valid(parent) ||
+ tasks_nocpu_error(parent, cs, &new_cpus) ||
+ cpumask_empty(subpartitions_cpus)))
partcmd = partcmd_invalidate;
/*
* On the other hand, an invalid partition root may be transitioned
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index a198e40c799b..150e5871e66f 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -71,7 +71,6 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
{
struct llist_head *lhead;
struct css_rstat_cpu *rstatc;
- struct css_rstat_cpu __percpu *rstatc_pcpu;
struct llist_node *self;
/*
@@ -104,18 +103,22 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
/*
* This function can be renentered by irqs and nmis for the same cgroup
* and may try to insert the same per-cpu lnode into the llist. Note
- * that llist_add() does not protect against such scenarios.
+ * that llist_add() does not protect against such scenarios. In addition
+ * this same per-cpu lnode can be modified through init_llist_node()
+ * from css_rstat_flush() running on a different CPU.
*
* To protect against such stacked contexts of irqs/nmis, we use the
* fact that lnode points to itself when not on a list and then use
- * this_cpu_cmpxchg() to atomically set to NULL to select the winner
+ * try_cmpxchg() to atomically set to NULL to select the winner
* which will call llist_add(). The losers can assume the insertion is
* successful and the winner will eventually add the per-cpu lnode to
* the llist.
+ *
+ * Please note that we can not use this_cpu_cmpxchg() here as on some
+ * archs it is not safe against modifications from multiple CPUs.
*/
self = &rstatc->lnode;
- rstatc_pcpu = css->rstat_cpu;
- if (this_cpu_cmpxchg(rstatc_pcpu->lnode.next, self, NULL) != self)
+ if (!try_cmpxchg(&rstatc->lnode.next, &self, NULL))
return;
lhead = ss_lhead_cpu(css->ss, cpu);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8b1b4c8a4f54..349ae7979da0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1414,7 +1414,7 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
* Ensure the thread adjusts the affinity once it reaches the
* thread function.
*/
- new->thread_flags = BIT(IRQTF_AFFINITY);
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
return 0;
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 0f92acdd354d..95c585c6ddc3 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -953,17 +953,24 @@ int kimage_load_segment(struct kimage *image, int idx)
return result;
}
-void *kimage_map_segment(struct kimage *image,
- unsigned long addr, unsigned long size)
+void *kimage_map_segment(struct kimage *image, int idx)
{
+ unsigned long addr, size, eaddr;
unsigned long src_page_addr, dest_page_addr = 0;
- unsigned long eaddr = addr + size;
kimage_entry_t *ptr, entry;
struct page **src_pages;
unsigned int npages;
+ struct page *cma;
void *vaddr = NULL;
int i;
+ cma = image->segment_cma[idx];
+ if (cma)
+ return page_address(cma);
+
+ addr = image->segment[idx].mem;
+ size = image->segment[idx].memsz;
+ eaddr = addr + size;
/*
* Collect the source pages and map them in a contiguous VA range.
*/
@@ -1004,7 +1011,8 @@ void *kimage_map_segment(struct kimage *image,
void kimage_unmap_segment(void *segment_buffer)
{
- vunmap(segment_buffer);
+ if (is_vmalloc_addr(segment_buffer))
+ vunmap(segment_buffer);
}
struct kexec_load_limit {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 99a3808d086f..39511dd2abc9 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1599,6 +1599,7 @@ void kthread_use_mm(struct mm_struct *mm)
WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
WARN_ON_ONCE(tsk->mm);
+ WARN_ON_ONCE(!mm->user_ns);
/*
* It is possible for mm to be the same as tsk->active_mm, but
diff --git a/kernel/power/em_netlink_autogen.c b/kernel/power/em_netlink_autogen.c
index a7a09ab1d1c2..ceb3b2bb6ebe 100644
--- a/kernel/power/em_netlink_autogen.c
+++ b/kernel/power/em_netlink_autogen.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/em.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
diff --git a/kernel/power/em_netlink_autogen.h b/kernel/power/em_netlink_autogen.h
index 78ce609641f1..140ab548103c 100644
--- a/kernel/power/em_netlink_autogen.h
+++ b/kernel/power/em_netlink_autogen.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/em.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_EM_GEN_H
#define _LINUX_EM_GEN_H
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 2da4482bb6eb..57c44268698f 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -349,9 +349,12 @@ static int suspend_test(int level)
if (pm_test_level == level) {
pr_info("suspend debug: Waiting for %d second(s).\n",
pm_test_delay);
- for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++)
- msleep(1000);
-
+ for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) {
+ if (level > TEST_CORE)
+ msleep(1000);
+ else
+ mdelay(1000);
+ }
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 05f5a49e9649..8f6d8d7f895c 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -41,6 +41,13 @@ static bool scx_init_task_enabled;
static bool scx_switching_all;
DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
+/*
+ * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass
+ * depth on enable failure. Will be removed when bypass depth is moved into the
+ * sched instance.
+ */
+static bool scx_bypassed_for_enable;
+
static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
@@ -975,6 +982,30 @@ static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
+static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
+{
+ struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+ bool preempt = false;
+
+ /*
+ * If @rq is in balance, the CPU is already vacant and looking for the
+ * next task to run. No need to preempt or trigger resched after moving
+ * @p into its local DSQ.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_BALANCE)
+ return;
+
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+ rq->curr->sched_class == &ext_sched_class) {
+ rq->curr->scx.slice = 0;
+ preempt = true;
+ }
+
+ if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
+ resched_curr(rq);
+}
+
static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
struct task_struct *p, u64 enq_flags)
{
@@ -1086,22 +1117,10 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
if (enq_flags & SCX_ENQ_CLEAR_OPSS)
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
- if (is_local) {
- struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
- bool preempt = false;
-
- if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
- rq->curr->sched_class == &ext_sched_class) {
- rq->curr->scx.slice = 0;
- preempt = true;
- }
-
- if (preempt || sched_class_above(&ext_sched_class,
- rq->curr->sched_class))
- resched_curr(rq);
- } else {
+ if (is_local)
+ local_dsq_post_enq(dsq, p, enq_flags);
+ else
raw_spin_unlock(&dsq->lock);
- }
}
static void task_unlink_from_dsq(struct task_struct *p,
@@ -1558,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
*
* @p may go through multiple stopping <-> running transitions between
* here and put_prev_task_scx() if task attribute changes occur while
- * balance_scx() leaves @rq unlocked. However, they don't contain any
+ * balance_one() leaves @rq unlocked. However, they don't contain any
* information meaningful to the BPF scheduler and can be suppressed by
* skipping the callbacks if the task is !QUEUED.
*/
@@ -1625,6 +1644,8 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
dsq_mod_nr(dst_dsq, 1);
p->scx.dsq = dst_dsq;
+
+ local_dsq_post_enq(dst_dsq, p, enq_flags);
}
/**
@@ -2351,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next)
* preempted, and it regaining control of the CPU.
*
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
- * next time that balance_scx() is invoked.
+ * next time that balance_one() is invoked.
*/
if (!rq->scx.cpu_released) {
if (SCX_HAS_OP(sch, cpu_release)) {
@@ -2402,7 +2423,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
* ops.enqueue() that @p is the only one available for this cpu,
* which should trigger an explicit follow-up scheduling event.
*/
- if (sched_class_above(&ext_sched_class, next->sched_class)) {
+ if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
} else {
@@ -2425,7 +2446,7 @@ static struct task_struct *
do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
{
struct task_struct *prev = rq->curr;
- bool keep_prev, kick_idle = false;
+ bool keep_prev;
struct task_struct *p;
/* see kick_cpus_irq_workfn() */
@@ -2457,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
}
/*
- * If balance_scx() is telling us to keep running @prev, replenish slice
+ * If balance_one() is telling us to keep running @prev, replenish slice
* if necessary and keep running @prev. Otherwise, pop the first one
* from the local DSQ.
*/
@@ -2467,12 +2488,8 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
refill_task_slice_dfl(rcu_dereference_sched(scx_root), p);
} else {
p = first_local_task(rq);
- if (!p) {
- if (kick_idle)
- scx_kick_cpu(rcu_dereference_sched(scx_root),
- cpu_of(rq), SCX_KICK_IDLE);
+ if (!p)
return NULL;
- }
if (unlikely(!p->scx.slice)) {
struct scx_sched *sch = rcu_dereference_sched(scx_root);
@@ -3575,7 +3592,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
int node;
irq_work_sync(&sch->error_irq_work);
- kthread_stop(sch->helper->task);
+ kthread_destroy_worker(sch->helper);
free_percpu(sch->pcpu);
@@ -3939,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node)
nr_donor_target, nr_target);
}
- for_each_cpu(cpu, resched_mask) {
- struct rq *rq = cpu_rq(cpu);
-
- raw_spin_rq_lock_irq(rq);
- resched_curr(rq);
- raw_spin_rq_unlock_irq(rq);
- }
+ for_each_cpu(cpu, resched_mask)
+ resched_cpu(cpu);
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
@@ -4008,7 +4020,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
*
* - ops.dispatch() is ignored.
*
- * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
+ * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
* can't be trusted. Whenever a tick triggers, the running task is rotated to
* the tail of the queue with core_sched_at touched.
*
@@ -4318,6 +4330,11 @@ static void scx_disable_workfn(struct kthread_work *work)
scx_dsp_max_batch = 0;
free_kick_syncs();
+ if (scx_bypassed_for_enable) {
+ scx_bypassed_for_enable = false;
+ scx_bypass(false);
+ }
+
mutex_unlock(&scx_enable_mutex);
WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
@@ -4761,8 +4778,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
}
sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
- if (!sch->pcpu)
+ if (!sch->pcpu) {
+ ret = -ENOMEM;
goto err_free_gdsqs;
+ }
sch->helper = kthread_run_worker(0, "sched_ext_helper");
if (IS_ERR(sch->helper)) {
@@ -4786,7 +4805,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
return sch;
err_stop_helper:
- kthread_stop(sch->helper->task);
+ kthread_destroy_worker(sch->helper);
err_free_pcpu:
free_percpu(sch->pcpu);
err_free_gdsqs:
@@ -4970,6 +4989,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
* Init in bypass mode to guarantee forward progress.
*/
scx_bypass(true);
+ scx_bypassed_for_enable = true;
for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
if (((void (**)(void))ops)[i])
@@ -5067,6 +5087,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
scx_task_iter_stop(&sti);
percpu_up_write(&scx_fork_rwsem);
+ scx_bypassed_for_enable = false;
scx_bypass(false);
if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
@@ -6043,7 +6064,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
/*
* A successfully consumed task can be dequeued before it starts
* running while the CPU is trying to migrate other dispatched
- * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
+ * tasks. Bump nr_tasks to tell balance_one() to retry on empty
* local DSQ.
*/
dspc->nr_tasks++;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d57727abaade..fe28d86f7c35 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -965,7 +965,7 @@ static const struct bpf_func_proto bpf_d_path_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_d_path_btf_ids[0],
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.allowed = bpf_d_path_allowed,
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3ec2033c0774..ef2d5dca6f70 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4518,8 +4518,11 @@ static int t_show(struct seq_file *m, void *v)
unsigned long direct;
direct = ftrace_find_rec_direct(rec->ip);
- if (direct)
- seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
+ if (direct) {
+ seq_printf(m, "\n\tdirect%s-->%pS",
+ ftrace_is_jmp(direct) ? "(jmp)" : "",
+ (void *)ftrace_jmp_get(direct));
+ }
}
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e575956ef9b5..6f2148df14d9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -10507,7 +10507,7 @@ static int __remove_instance(struct trace_array *tr)
/* Disable all the flags that were enabled coming in */
for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
- if ((1 << i) & ZEROED_TRACE_FLAGS)
+ if ((1ULL << i) & ZEROED_TRACE_FLAGS)
set_tracer_flag(tr, 1ULL << i, 0);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index b16a5a158040..76067529db61 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -700,6 +700,8 @@ int trace_event_reg(struct trace_event_call *call,
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
+ if (!call->class->perf_probe)
+ return -ENODEV;
return tracepoint_probe_register(call->tp,
call->class->perf_probe,
call);