summaryrefslogtreecommitdiff
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 17:08:13 -0700
committerTejun Heo <tj@kernel.org>2013-04-01 18:45:36 -0700
commit229641a6f1f09e27a1f12fba38980f33f4c92975 (patch)
tree234a6f8aea0910de3242af0bbe6d7494fcf81847 /kernel/trace/trace.c
parentd55262c4d164759a8debe772da6c9b16059dec47 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10 branch to take advantage of custom attrs and NUMA support for unbound workqueues. Mainline currently contains two commits which result in non-trivial merge conflicts with wq/for-3.10 and because block/for-3.10/core is based on v3.9-rc3 which contains one of the conflicting commits, we need a pre-merge-window merge anyway. Let's pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer from workqueue merge conflicts. The two conflicts and their resolutions: * e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes worker_pool_assign_id() to use idr_alloc() instead of the old idr interface. worker_pool_assign_id() goes through multiple locking changes in wq/for-3.10 causing the following conflict. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; <<<<<<< HEAD lockdep_assert_held(&wq_pool_mutex); do { if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) return -ENOMEM; ret = idr_get_new(&worker_pool_idr, pool, &pool->id); } while (ret == -EAGAIN); ======= mutex_lock(&worker_pool_idr_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 return ret < 0 ? ret : 0; } We want locking from the former and idr_alloc() usage from the latter, which can be combined to the following. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } * eb2834285c ("workqueue: fix possible pool stall bug in wq_unbind_fn()") updated wq_unbind_fn() such that it has single larger for_each_std_worker_pool() loop instead of two separate loops with a schedule() call inbetween. wq/for-3.10 renamed pool->assoc_mutex to pool->manager_mutex causing the following conflict (earlier function body and comments omitted for brevity). static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); <<<<<<< HEAD mutex_unlock(&pool->manager_mutex); } ======= mutex_unlock(&pool->assoc_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 schedule(); <<<<<<< HEAD for_each_cpu_worker_pool(pool, cpu) ======= >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } The resolution is mostly trivial. We want the control flow of the latter with the rename of the former. static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); schedule(); atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c86
1 files changed, 71 insertions, 15 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c2e2c2310374..4f1dade56981 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct ring_buffer *buf = tr->buffer;
+ struct ring_buffer *buf;
if (trace_stop_count)
return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&ftrace_max_lock);
+ buf = tr->buffer;
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
@@ -2400,6 +2401,27 @@ static void test_ftrace_alive(struct seq_file *m)
seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
}
+#ifdef CONFIG_TRACER_MAX_TRACE
+static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
+{
+ if (iter->trace->allocated_snapshot)
+ seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
+ else
+ seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
+
+ seq_printf(m, "# Snapshot commands:\n");
+ seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
+ seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
+ seq_printf(m, "# Takes a snapshot of the main buffer.\n");
+ seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
+ seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
+ seq_printf(m, "# is not a '0' or '1')\n");
+}
+#else
+/* Should never be called */
+static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
+#endif
+
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
@@ -2411,7 +2433,9 @@ static int s_show(struct seq_file *m, void *v)
seq_puts(m, "#\n");
test_ftrace_alive(m);
}
- if (iter->trace && iter->trace->print_header)
+ if (iter->snapshot && trace_empty(iter))
+ print_snapshot_help(m, iter);
+ else if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m);
else
trace_default_header(m);
@@ -2857,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
return -EINVAL;
}
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+ if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+ return -1;
+
+ return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
- return;
+ return 0;
+
+ /* Give the tracer a chance to approve the change */
+ if (current_trace->flag_changed)
+ if (current_trace->flag_changed(current_trace, mask, !!enabled))
+ return -EINVAL;
if (enabled)
trace_flags |= mask;
@@ -2871,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
- if (mask == TRACE_ITER_OVERWRITE)
+ if (mask == TRACE_ITER_OVERWRITE) {
ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+ }
if (mask == TRACE_ITER_PRINTK)
trace_printk_start_stop_comm(enabled);
+
+ return 0;
}
static int trace_set_options(char *option)
{
char *cmp;
int neg = 0;
- int ret = 0;
+ int ret = -ENODEV;
int i;
cmp = strstrip(option);
@@ -2892,19 +2936,20 @@ static int trace_set_options(char *option)
cmp += 2;
}
+ mutex_lock(&trace_types_lock);
+
for (i = 0; trace_options[i]; i++) {
if (strcmp(cmp, trace_options[i]) == 0) {
- set_tracer_flags(1 << i, !neg);
+ ret = set_tracer_flag(1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
- if (!trace_options[i]) {
- mutex_lock(&trace_types_lock);
+ if (!trace_options[i])
ret = set_tracer_option(current_trace, cmp, neg);
- mutex_unlock(&trace_types_lock);
- }
+
+ mutex_unlock(&trace_types_lock);
return ret;
}
@@ -2914,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
+ int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
@@ -2923,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
- trace_set_options(buf);
+ ret = trace_set_options(buf);
+ if (ret < 0)
+ return ret;
*ppos += cnt;
@@ -3227,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
goto out;
trace_branch_disable();
+
+ current_trace->enabled = false;
+
if (current_trace->reset)
current_trace->reset(tr);
@@ -3271,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
}
current_trace = t;
+ current_trace->enabled = true;
trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
@@ -4144,8 +4196,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
default:
if (current_trace->allocated_snapshot)
tracing_reset_online_cpus(&max_tr);
- else
- ret = -EINVAL;
break;
}
@@ -4759,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (val != 0 && val != 1)
return -EINVAL;
- set_tracer_flags(1 << index, val);
+
+ mutex_lock(&trace_types_lock);
+ ret = set_tracer_flag(1 << index, val);
+ mutex_unlock(&trace_types_lock);
+
+ if (ret < 0)
+ return ret;
*ppos += cnt;