diff options
Diffstat (limited to 'kernel/trace/trace.c')
| -rw-r--r-- | kernel/trace/trace.c | 1059 |
1 files changed, 139 insertions, 920 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8bd4ec08fb36..2f6fbf9e7caf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -67,7 +67,7 @@ * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ -static bool __read_mostly tracing_selftest_running; +bool __read_mostly tracing_selftest_running; /* * If boot-time tracing including tracers/events via kernel cmdline @@ -83,7 +83,6 @@ void __init disable_tracing_selftest(const char *reason) } } #else -#define tracing_selftest_running 0 #define tracing_selftest_disabled 0 #endif @@ -114,7 +113,7 @@ DEFINE_PER_CPU(bool, trace_taskinfo_save); * of the tracer is successful. But that is the only place that sets * this back to zero. */ -static int tracing_disabled = 1; +int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; @@ -535,22 +534,11 @@ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; -static struct trace_array *printk_trace = &global_trace; +struct trace_array *printk_trace = &global_trace; /* List of trace_arrays interested in the top level trace_marker */ static LIST_HEAD(marker_copies); -static __always_inline bool printk_binsafe(struct trace_array *tr) -{ - /* - * The binary format of traceprintk can cause a crash if used - * by a buffer from another boot. Force the use of the - * non binary version of trace_printk if the trace_printk - * buffer is a boot mapped ring buffer. - */ - return !(tr->flags & TRACE_ARRAY_FL_BOOT); -} - static void update_printk_trace(struct trace_array *tr) { if (printk_trace == tr) @@ -649,248 +637,6 @@ int tracing_check_open_get_tr(struct trace_array *tr) return 0; } -/** - * trace_find_filtered_pid - check if a pid exists in a filtered_pid list - * @filtered_pids: The list of pids to check - * @search_pid: The PID to find in @filtered_pids - * - * Returns true if @search_pid is found in @filtered_pids, and false otherwise. - */ -bool -trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) -{ - return trace_pid_list_is_set(filtered_pids, search_pid); -} - -/** - * trace_ignore_this_task - should a task be ignored for tracing - * @filtered_pids: The list of pids to check - * @filtered_no_pids: The list of pids not to be traced - * @task: The task that should be ignored if not filtered - * - * Checks if @task should be traced or not from @filtered_pids. - * Returns true if @task should *NOT* be traced. - * Returns false if @task should be traced. - */ -bool -trace_ignore_this_task(struct trace_pid_list *filtered_pids, - struct trace_pid_list *filtered_no_pids, - struct task_struct *task) -{ - /* - * If filtered_no_pids is not empty, and the task's pid is listed - * in filtered_no_pids, then return true. - * Otherwise, if filtered_pids is empty, that means we can - * trace all tasks. If it has content, then only trace pids - * within filtered_pids. - */ - - return (filtered_pids && - !trace_find_filtered_pid(filtered_pids, task->pid)) || - (filtered_no_pids && - trace_find_filtered_pid(filtered_no_pids, task->pid)); -} - -/** - * trace_filter_add_remove_task - Add or remove a task from a pid_list - * @pid_list: The list to modify - * @self: The current task for fork or NULL for exit - * @task: The task to add or remove - * - * If adding a task, if @self is defined, the task is only added if @self - * is also included in @pid_list. This happens on fork and tasks should - * only be added when the parent is listed. If @self is NULL, then the - * @task pid will be removed from the list, which would happen on exit - * of a task. - */ -void trace_filter_add_remove_task(struct trace_pid_list *pid_list, - struct task_struct *self, - struct task_struct *task) -{ - if (!pid_list) - return; - - /* For forks, we only add if the forking task is listed */ - if (self) { - if (!trace_find_filtered_pid(pid_list, self->pid)) - return; - } - - /* "self" is set for forks, and NULL for exits */ - if (self) - trace_pid_list_set(pid_list, task->pid); - else - trace_pid_list_clear(pid_list, task->pid); -} - -/** - * trace_pid_next - Used for seq_file to get to the next pid of a pid_list - * @pid_list: The pid list to show - * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) - * @pos: The position of the file - * - * This is used by the seq_file "next" operation to iterate the pids - * listed in a trace_pid_list structure. - * - * Returns the pid+1 as we want to display pid of zero, but NULL would - * stop the iteration. - */ -void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) -{ - long pid = (unsigned long)v; - unsigned int next; - - (*pos)++; - - /* pid already is +1 of the actual previous bit */ - if (trace_pid_list_next(pid_list, pid, &next) < 0) - return NULL; - - pid = next; - - /* Return pid + 1 to allow zero to be represented */ - return (void *)(pid + 1); -} - -/** - * trace_pid_start - Used for seq_file to start reading pid lists - * @pid_list: The pid list to show - * @pos: The position of the file - * - * This is used by seq_file "start" operation to start the iteration - * of listing pids. - * - * Returns the pid+1 as we want to display pid of zero, but NULL would - * stop the iteration. - */ -void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) -{ - unsigned long pid; - unsigned int first; - loff_t l = 0; - - if (trace_pid_list_first(pid_list, &first) < 0) - return NULL; - - pid = first; - - /* Return pid + 1 so that zero can be the exit value */ - for (pid++; pid && l < *pos; - pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) - ; - return (void *)pid; -} - -/** - * trace_pid_show - show the current pid in seq_file processing - * @m: The seq_file structure to write into - * @v: A void pointer of the pid (+1) value to display - * - * Can be directly used by seq_file operations to display the current - * pid value. - */ -int trace_pid_show(struct seq_file *m, void *v) -{ - unsigned long pid = (unsigned long)v - 1; - - seq_printf(m, "%lu\n", pid); - return 0; -} - -/* 128 should be much more than enough */ -#define PID_BUF_SIZE 127 - -int trace_pid_write(struct trace_pid_list *filtered_pids, - struct trace_pid_list **new_pid_list, - const char __user *ubuf, size_t cnt) -{ - struct trace_pid_list *pid_list; - struct trace_parser parser; - unsigned long val; - int nr_pids = 0; - ssize_t read = 0; - ssize_t ret; - loff_t pos; - pid_t pid; - - if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) - return -ENOMEM; - - /* - * Always recreate a new array. The write is an all or nothing - * operation. Always create a new array when adding new pids by - * the user. If the operation fails, then the current list is - * not modified. - */ - pid_list = trace_pid_list_alloc(); - if (!pid_list) { - trace_parser_put(&parser); - return -ENOMEM; - } - - if (filtered_pids) { - /* copy the current bits to the new max */ - ret = trace_pid_list_first(filtered_pids, &pid); - while (!ret) { - ret = trace_pid_list_set(pid_list, pid); - if (ret < 0) - goto out; - - ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); - nr_pids++; - } - } - - ret = 0; - while (cnt > 0) { - - pos = 0; - - ret = trace_get_user(&parser, ubuf, cnt, &pos); - if (ret < 0) - break; - - read += ret; - ubuf += ret; - cnt -= ret; - - if (!trace_parser_loaded(&parser)) - break; - - ret = -EINVAL; - if (kstrtoul(parser.buffer, 0, &val)) - break; - - pid = (pid_t)val; - - if (trace_pid_list_set(pid_list, pid) < 0) { - ret = -1; - break; - } - nr_pids++; - - trace_parser_clear(&parser); - ret = 0; - } - out: - trace_parser_put(&parser); - - if (ret < 0) { - trace_pid_list_free(pid_list); - return ret; - } - - if (!nr_pids) { - /* Cleared the list of pids */ - trace_pid_list_free(pid_list); - pid_list = NULL; - } - - *new_pid_list = pid_list; - - return read; -} - static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) { u64 ts; @@ -1033,56 +779,6 @@ static inline void trace_access_lock_init(void) #endif -#ifdef CONFIG_STACKTRACE -static void __ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned int trace_ctx, - int skip, struct pt_regs *regs); -static inline void ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned int trace_ctx, - int skip, struct pt_regs *regs); - -#else -static inline void __ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned int trace_ctx, - int skip, struct pt_regs *regs) -{ -} -static inline void ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned long trace_ctx, - int skip, struct pt_regs *regs) -{ -} - -#endif - -static __always_inline void -trace_event_setup(struct ring_buffer_event *event, - int type, unsigned int trace_ctx) -{ - struct trace_entry *ent = ring_buffer_event_data(event); - - tracing_generic_entry_update(ent, type, trace_ctx); -} - -static __always_inline struct ring_buffer_event * -__trace_buffer_lock_reserve(struct trace_buffer *buffer, - int type, - unsigned long len, - unsigned int trace_ctx) -{ - struct ring_buffer_event *event; - - event = ring_buffer_lock_reserve(buffer, len); - if (event != NULL) - trace_event_setup(event, type, trace_ctx); - - return event; -} - void tracer_tracing_on(struct trace_array *tr) { if (tr->array_buffer.buffer) @@ -1110,130 +806,10 @@ void tracing_on(void) } EXPORT_SYMBOL_GPL(tracing_on); - -static __always_inline void -__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) -{ - __this_cpu_write(trace_taskinfo_save, true); - - /* If this is the temp buffer, we need to commit fully */ - if (this_cpu_read(trace_buffered_event) == event) { - /* Length is in event->array[0] */ - ring_buffer_write(buffer, event->array[0], &event->array[1]); - /* Release the temp buffer */ - this_cpu_dec(trace_buffered_event_cnt); - /* ring_buffer_unlock_commit() enables preemption */ - preempt_enable_notrace(); - } else - ring_buffer_unlock_commit(buffer); -} - -int __trace_array_puts(struct trace_array *tr, unsigned long ip, - const char *str, int size) -{ - struct ring_buffer_event *event; - struct trace_buffer *buffer; - struct print_entry *entry; - unsigned int trace_ctx; - int alloc; - - if (!(tr->trace_flags & TRACE_ITER(PRINTK))) - return 0; - - if (unlikely(tracing_selftest_running && tr == &global_trace)) - return 0; - - if (unlikely(tracing_disabled)) - return 0; - - alloc = sizeof(*entry) + size + 2; /* possible \n added */ - - trace_ctx = tracing_gen_ctx(); - buffer = tr->array_buffer.buffer; - guard(ring_buffer_nest)(buffer); - event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, - trace_ctx); - if (!event) - return 0; - - entry = ring_buffer_event_data(event); - entry->ip = ip; - - memcpy(&entry->buf, str, size); - - /* Add a newline if necessary */ - if (entry->buf[size - 1] != '\n') { - entry->buf[size] = '\n'; - entry->buf[size + 1] = '\0'; - } else - entry->buf[size] = '\0'; - - __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); - return size; -} -EXPORT_SYMBOL_GPL(__trace_array_puts); - -/** - * __trace_puts - write a constant string into the trace buffer. - * @ip: The address of the caller - * @str: The constant string to write - * @size: The size of the string. - */ -int __trace_puts(unsigned long ip, const char *str, int size) -{ - return __trace_array_puts(printk_trace, ip, str, size); -} -EXPORT_SYMBOL_GPL(__trace_puts); - -/** - * __trace_bputs - write the pointer to a constant string into trace buffer - * @ip: The address of the caller - * @str: The constant string to write to the buffer to - */ -int __trace_bputs(unsigned long ip, const char *str) -{ - struct trace_array *tr = READ_ONCE(printk_trace); - struct ring_buffer_event *event; - struct trace_buffer *buffer; - struct bputs_entry *entry; - unsigned int trace_ctx; - int size = sizeof(struct bputs_entry); - - if (!printk_binsafe(tr)) - return __trace_puts(ip, str, strlen(str)); - - if (!(tr->trace_flags & TRACE_ITER(PRINTK))) - return 0; - - if (unlikely(tracing_selftest_running || tracing_disabled)) - return 0; - - trace_ctx = tracing_gen_ctx(); - buffer = tr->array_buffer.buffer; - - guard(ring_buffer_nest)(buffer); - event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, - trace_ctx); - if (!event) - return 0; - - entry = ring_buffer_event_data(event); - entry->ip = ip; - entry->str = str; - - __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); - - return 1; -} -EXPORT_SYMBOL_GPL(__trace_bputs); - #ifdef CONFIG_TRACER_SNAPSHOT static void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) { - struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { @@ -1249,15 +825,15 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr, return; } - /* Note, snapshot can not be used when the tracer uses it */ - if (tracer->use_max_tr) { - trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); + if (tr->mapped) { + trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } - if (tr->mapped) { - trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); + /* Note, snapshot can not be used when the tracer uses it */ + if (tracer_uses_snapshot(tr->current_trace)) { + trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } @@ -1357,12 +933,12 @@ int tracing_alloc_snapshot_instance(struct trace_array *tr) /* Make the snapshot buffer have the same order as main buffer */ order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); - ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); + ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order); if (ret < 0) return ret; /* allocate spare buffer */ - ret = resize_buffer_duplicate_size(&tr->max_buffer, + ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, &tr->array_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; @@ -1380,10 +956,10 @@ static void free_snapshot(struct trace_array *tr) * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ - ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); - ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); - set_buffer_entries(&tr->max_buffer, 1); - tracing_reset_online_cpus(&tr->max_buffer); + ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, 0); + ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); + set_buffer_entries(&tr->snapshot_buffer, 1); + tracing_reset_online_cpus(&tr->snapshot_buffer); tr->allocated_snapshot = false; } @@ -1499,7 +1075,7 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, guard(mutex)(&trace_types_lock); - if (tr->current_trace->use_max_tr) + if (tracer_uses_snapshot(tr->current_trace)) return -EBUSY; /* @@ -1666,9 +1242,18 @@ EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) { + struct trace_array *tr = READ_ONCE(printk_trace); + trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, "Disabling tracing due to warning\n"); tracing_off(); + + /* Disable trace_printk() buffer too */ + if (tr != &global_trace) { + trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, + "Disabling tracing due to warning\n"); + tracer_tracing_off(tr); + } } } @@ -1903,10 +1488,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE -static const struct file_operations tracing_max_lat_fops; - #ifdef LATENCY_FS_NOTIFY - static struct workqueue_struct *fsnotify_wq; static void latency_fsnotify_workfn(struct work_struct *work) @@ -1923,17 +1505,6 @@ static void latency_fsnotify_workfn_irq(struct irq_work *iwork) queue_work(fsnotify_wq, &tr->fsnotify_work); } -static void trace_create_maxlat_file(struct trace_array *tr, - struct dentry *d_tracer) -{ - INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); - init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); - tr->d_max_latency = trace_create_file("tracing_max_latency", - TRACE_MODE_WRITE, - d_tracer, tr, - &tracing_max_lat_fops); -} - __init static int latency_fsnotify_init(void) { fsnotify_wq = alloc_workqueue("tr_max_lat_wq", @@ -1958,14 +1529,22 @@ void latency_fsnotify(struct trace_array *tr) */ irq_work_queue(&tr->fsnotify_irqwork); } +#endif /* !LATENCY_FS_NOTIFY */ -#else /* !LATENCY_FS_NOTIFY */ - -#define trace_create_maxlat_file(tr, d_tracer) \ - trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ - d_tracer, tr, &tracing_max_lat_fops) +static const struct file_operations tracing_max_lat_fops; +static void trace_create_maxlat_file(struct trace_array *tr, + struct dentry *d_tracer) +{ +#ifdef LATENCY_FS_NOTIFY + INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); + init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); #endif + tr->d_max_latency = trace_create_file("tracing_max_latency", + TRACE_MODE_WRITE, + d_tracer, tr, + &tracing_max_lat_fops); +} /* * Copy the new maximum trace into the separate maximum-trace @@ -1976,8 +1555,8 @@ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct array_buffer *trace_buf = &tr->array_buffer; - struct array_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); + struct array_buffer *max_buf = &tr->snapshot_buffer; struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; @@ -2006,7 +1585,14 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) tracing_record_cmdline(tsk); latency_fsnotify(tr); } +#else +static inline void trace_create_maxlat_file(struct trace_array *tr, + struct dentry *d_tracer) { } +static inline void __update_max_tr(struct trace_array *tr, + struct task_struct *tsk, int cpu) { } +#endif /* CONFIG_TRACER_MAX_TRACE */ +#ifdef CONFIG_TRACER_SNAPSHOT /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer @@ -2036,17 +1622,16 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, /* Inherit the recordable setting from array_buffer */ if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) - ring_buffer_record_on(tr->max_buffer.buffer); + ring_buffer_record_on(tr->snapshot_buffer.buffer); else - ring_buffer_record_off(tr->max_buffer.buffer); + ring_buffer_record_off(tr->snapshot_buffer.buffer); -#ifdef CONFIG_TRACER_SNAPSHOT if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { arch_spin_unlock(&tr->max_lock); return; } -#endif - swap(tr->array_buffer.buffer, tr->max_buffer.buffer); + + swap(tr->array_buffer.buffer, tr->snapshot_buffer.buffer); __update_max_tr(tr, tsk, cpu); @@ -2081,7 +1666,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) arch_spin_lock(&tr->max_lock); - ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); + ret = ring_buffer_swap_cpu(tr->snapshot_buffer.buffer, tr->array_buffer.buffer, cpu); if (ret == -EBUSY) { /* @@ -2091,7 +1676,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * and flag that it failed. * Another reason is resize is in progress. */ - trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, + trace_array_printk_buf(tr->snapshot_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit or resize in progress\n"); } @@ -2100,8 +1685,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } - -#endif /* CONFIG_TRACER_MAX_TRACE */ +#endif /* CONFIG_TRACER_SNAPSHOT */ struct pipe_wait { struct trace_iterator *iter; @@ -2134,13 +1718,13 @@ static int wait_on_pipe(struct trace_iterator *iter, int full) ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, wait_pipe_cond, &pwait); -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT /* * Make sure this is still the snapshot buffer, as if a snapshot were * to happen, this would now be the main buffer. */ if (iter->snapshot) - iter->array_buffer = &iter->tr->max_buffer; + iter->array_buffer = &iter->tr->snapshot_buffer; #endif return ret; } @@ -2205,10 +1789,10 @@ static int run_tracer_selftest(struct tracer *type) tr->current_trace_flags = type->flags ? : type->default_flags; #ifdef CONFIG_TRACER_MAX_TRACE - if (type->use_max_tr) { + if (tracer_uses_snapshot(type)) { /* If we expanded the buffers, make sure the max is expanded too */ if (tr->ring_buffer_expanded) - ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, + ring_buffer_resize(tr->snapshot_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } @@ -2230,12 +1814,12 @@ static int run_tracer_selftest(struct tracer *type) tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE - if (type->use_max_tr) { + if (tracer_uses_snapshot(type)) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (tr->ring_buffer_expanded) - ring_buffer_resize(tr->max_buffer.buffer, 1, + ring_buffer_resize(tr->snapshot_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif @@ -2477,8 +2061,8 @@ void tracing_reset_all_online_cpus_unlocked(void) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->array_buffer); -#ifdef CONFIG_TRACER_MAX_TRACE - tracing_reset_online_cpus(&tr->max_buffer); +#ifdef CONFIG_TRACER_SNAPSHOT + tracing_reset_online_cpus(&tr->snapshot_buffer); #endif } } @@ -2517,8 +2101,8 @@ static void tracing_start_tr(struct trace_array *tr) if (buffer) ring_buffer_record_enable(buffer); -#ifdef CONFIG_TRACER_MAX_TRACE - buffer = tr->max_buffer.buffer; +#ifdef CONFIG_TRACER_SNAPSHOT + buffer = tr->snapshot_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif @@ -2553,8 +2137,8 @@ static void tracing_stop_tr(struct trace_array *tr) if (buffer) ring_buffer_record_disable(buffer); -#ifdef CONFIG_TRACER_MAX_TRACE - buffer = tr->max_buffer.buffer; +#ifdef CONFIG_TRACER_SNAPSHOT + buffer = tr->snapshot_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif @@ -3002,10 +2586,10 @@ struct ftrace_stacks { static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(int, ftrace_stack_reserve); -static void __ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned int trace_ctx, - int skip, struct pt_regs *regs) +void __ftrace_trace_stack(struct trace_array *tr, + struct trace_buffer *buffer, + unsigned int trace_ctx, + int skip, struct pt_regs *regs) { struct ring_buffer_event *event; unsigned int size, nr_entries; @@ -3088,17 +2672,6 @@ static void __ftrace_trace_stack(struct trace_array *tr, trace_clear_recursion(bit); } -static inline void ftrace_trace_stack(struct trace_array *tr, - struct trace_buffer *buffer, - unsigned int trace_ctx, - int skip, struct pt_regs *regs) -{ - if (!(tr->trace_flags & TRACE_ITER(STACKTRACE))) - return; - - __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); -} - void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip) { @@ -3233,324 +2806,6 @@ void trace_last_func_repeats(struct trace_array *tr, __buffer_unlock_commit(buffer, event); } -/* created for use with alloc_percpu */ -struct trace_buffer_struct { - int nesting; - char buffer[4][TRACE_BUF_SIZE]; -}; - -static struct trace_buffer_struct __percpu *trace_percpu_buffer; - -/* - * This allows for lockless recording. If we're nested too deeply, then - * this returns NULL. - */ -static char *get_trace_buf(void) -{ - struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); - - if (!trace_percpu_buffer || buffer->nesting >= 4) - return NULL; - - buffer->nesting++; - - /* Interrupts must see nesting incremented before we use the buffer */ - barrier(); - return &buffer->buffer[buffer->nesting - 1][0]; -} - -static void put_trace_buf(void) -{ - /* Don't let the decrement of nesting leak before this */ - barrier(); - this_cpu_dec(trace_percpu_buffer->nesting); -} - -static int alloc_percpu_trace_buffer(void) -{ - struct trace_buffer_struct __percpu *buffers; - - if (trace_percpu_buffer) - return 0; - - buffers = alloc_percpu(struct trace_buffer_struct); - if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) - return -ENOMEM; - - trace_percpu_buffer = buffers; - return 0; -} - -static int buffers_allocated; - -void trace_printk_init_buffers(void) -{ - if (buffers_allocated) - return; - - if (alloc_percpu_trace_buffer()) - return; - - /* trace_printk() is for debug use only. Don't use it in production. */ - - pr_warn("\n"); - pr_warn("**********************************************************\n"); - pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); - pr_warn("** **\n"); - pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); - pr_warn("** **\n"); - pr_warn("** This means that this is a DEBUG kernel and it is **\n"); - pr_warn("** unsafe for production use. **\n"); - pr_warn("** **\n"); - pr_warn("** If you see this message and you are not debugging **\n"); - pr_warn("** the kernel, report this immediately to your vendor! **\n"); - pr_warn("** **\n"); - pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); - pr_warn("**********************************************************\n"); - - /* Expand the buffers to set size */ - tracing_update_buffers(&global_trace); - - buffers_allocated = 1; - - /* - * trace_printk_init_buffers() can be called by modules. - * If that happens, then we need to start cmdline recording - * directly here. If the global_trace.buffer is already - * allocated here, then this was called by module code. - */ - if (global_trace.array_buffer.buffer) - tracing_start_cmdline_record(); -} -EXPORT_SYMBOL_GPL(trace_printk_init_buffers); - -void trace_printk_start_comm(void) -{ - /* Start tracing comms if trace printk is set */ - if (!buffers_allocated) - return; - tracing_start_cmdline_record(); -} - -static void trace_printk_start_stop_comm(int enabled) -{ - if (!buffers_allocated) - return; - - if (enabled) - tracing_start_cmdline_record(); - else - tracing_stop_cmdline_record(); -} - -/** - * trace_vbprintk - write binary msg to tracing buffer - * @ip: The address of the caller - * @fmt: The string format to write to the buffer - * @args: Arguments for @fmt - */ -int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) -{ - struct ring_buffer_event *event; - struct trace_buffer *buffer; - struct trace_array *tr = READ_ONCE(printk_trace); - struct bprint_entry *entry; - unsigned int trace_ctx; - char *tbuffer; - int len = 0, size; - - if (!printk_binsafe(tr)) - return trace_vprintk(ip, fmt, args); - - if (unlikely(tracing_selftest_running || tracing_disabled)) - return 0; - - /* Don't pollute graph traces with trace_vprintk internals */ - pause_graph_tracing(); - - trace_ctx = tracing_gen_ctx(); - guard(preempt_notrace)(); - - tbuffer = get_trace_buf(); - if (!tbuffer) { - len = 0; - goto out_nobuffer; - } - - len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); - - if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) - goto out_put; - - size = sizeof(*entry) + sizeof(u32) * len; - buffer = tr->array_buffer.buffer; - scoped_guard(ring_buffer_nest, buffer) { - event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, - trace_ctx); - if (!event) - goto out_put; - entry = ring_buffer_event_data(event); - entry->ip = ip; - entry->fmt = fmt; - - memcpy(entry->buf, tbuffer, sizeof(u32) * len); - __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); - } -out_put: - put_trace_buf(); - -out_nobuffer: - unpause_graph_tracing(); - - return len; -} -EXPORT_SYMBOL_GPL(trace_vbprintk); - -static __printf(3, 0) -int __trace_array_vprintk(struct trace_buffer *buffer, - unsigned long ip, const char *fmt, va_list args) -{ - struct ring_buffer_event *event; - int len = 0, size; - struct print_entry *entry; - unsigned int trace_ctx; - char *tbuffer; - - if (tracing_disabled) - return 0; - - /* Don't pollute graph traces with trace_vprintk internals */ - pause_graph_tracing(); - - trace_ctx = tracing_gen_ctx(); - guard(preempt_notrace)(); - - - tbuffer = get_trace_buf(); - if (!tbuffer) { - len = 0; - goto out_nobuffer; - } - - len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); - - size = sizeof(*entry) + len + 1; - scoped_guard(ring_buffer_nest, buffer) { - event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, - trace_ctx); - if (!event) - goto out; - entry = ring_buffer_event_data(event); - entry->ip = ip; - - memcpy(&entry->buf, tbuffer, len + 1); - __buffer_unlock_commit(buffer, event); - ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); - } -out: - put_trace_buf(); - -out_nobuffer: - unpause_graph_tracing(); - - return len; -} - -int trace_array_vprintk(struct trace_array *tr, - unsigned long ip, const char *fmt, va_list args) -{ - if (tracing_selftest_running && tr == &global_trace) - return 0; - - return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); -} - -/** - * trace_array_printk - Print a message to a specific instance - * @tr: The instance trace_array descriptor - * @ip: The instruction pointer that this is called from. - * @fmt: The format to print (printf format) - * - * If a subsystem sets up its own instance, they have the right to - * printk strings into their tracing instance buffer using this - * function. Note, this function will not write into the top level - * buffer (use trace_printk() for that), as writing into the top level - * buffer should only have events that can be individually disabled. - * trace_printk() is only used for debugging a kernel, and should not - * be ever incorporated in normal use. - * - * trace_array_printk() can be used, as it will not add noise to the - * top level tracing buffer. - * - * Note, trace_array_init_printk() must be called on @tr before this - * can be used. - */ -int trace_array_printk(struct trace_array *tr, - unsigned long ip, const char *fmt, ...) -{ - int ret; - va_list ap; - - if (!tr) - return -ENOENT; - - /* This is only allowed for created instances */ - if (tr == &global_trace) - return 0; - - if (!(tr->trace_flags & TRACE_ITER(PRINTK))) - return 0; - - va_start(ap, fmt); - ret = trace_array_vprintk(tr, ip, fmt, ap); - va_end(ap); - return ret; -} -EXPORT_SYMBOL_GPL(trace_array_printk); - -/** - * trace_array_init_printk - Initialize buffers for trace_array_printk() - * @tr: The trace array to initialize the buffers for - * - * As trace_array_printk() only writes into instances, they are OK to - * have in the kernel (unlike trace_printk()). This needs to be called - * before trace_array_printk() can be used on a trace_array. - */ -int trace_array_init_printk(struct trace_array *tr) -{ - if (!tr) - return -ENOENT; - - /* This is only allowed for created instances */ - if (tr == &global_trace) - return -EINVAL; - - return alloc_percpu_trace_buffer(); -} -EXPORT_SYMBOL_GPL(trace_array_init_printk); - -int trace_array_printk_buf(struct trace_buffer *buffer, - unsigned long ip, const char *fmt, ...) -{ - int ret; - va_list ap; - - if (!(printk_trace->trace_flags & TRACE_ITER(PRINTK))) - return 0; - - va_start(ap, fmt); - ret = __trace_array_vprintk(buffer, ip, fmt, ap); - va_end(ap); - return ret; -} - -int trace_vprintk(unsigned long ip, const char *fmt, va_list args) -{ - return trace_array_vprintk(printk_trace, ip, fmt, args); -} -EXPORT_SYMBOL_GPL(trace_vprintk); - static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); @@ -3987,10 +3242,8 @@ static void *s_start(struct seq_file *m, loff_t *pos) } mutex_unlock(&trace_types_lock); -#ifdef CONFIG_TRACER_MAX_TRACE - if (iter->snapshot && iter->trace->use_max_tr) + if (iter->snapshot && tracer_uses_snapshot(iter->trace)) return ERR_PTR(-EBUSY); -#endif if (*pos != iter->pos) { iter->ent = NULL; @@ -4029,10 +3282,8 @@ static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; -#ifdef CONFIG_TRACER_MAX_TRACE - if (iter->snapshot && iter->trace->use_max_tr) + if (iter->snapshot && tracer_uses_snapshot(iter->trace)) return; -#endif trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); @@ -4286,7 +3537,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) /* ftrace and system call events are still OK */ if ((event->type > __TRACE_LAST_TYPE) && !is_syscall_event(event)) - return print_event_fields(iter, event); + return print_event_fields(iter, event); } return event->funcs->trace(iter, sym_flags, event); } @@ -4509,7 +3760,7 @@ static void test_ftrace_alive(struct seq_file *m) "# MAY BE MISSING FUNCTION EVENTS\n"); } -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" @@ -4687,10 +3938,10 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot) iter->tr = tr; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) - iter->array_buffer = &tr->max_buffer; + iter->array_buffer = &tr->snapshot_buffer; else #endif iter->array_buffer = &tr->array_buffer; @@ -4759,11 +4010,6 @@ int tracing_open_generic(struct inode *inode, struct file *filp) return 0; } -bool tracing_is_disabled(void) -{ - return (tracing_disabled) ? true: false; -} - /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. @@ -4881,6 +4127,8 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file) return single_release(inode, file); } +static bool update_last_data_if_empty(struct trace_array *tr); + static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; @@ -4898,13 +4146,15 @@ static int tracing_open(struct inode *inode, struct file *file) #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) - trace_buf = &tr->max_buffer; + trace_buf = &tr->snapshot_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset_cpu(trace_buf, cpu); + + update_last_data_if_empty(tr); } if (file->f_mode & FMODE_READ) { @@ -4929,11 +4179,9 @@ static int tracing_open(struct inode *inode, struct file *file) static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { -#ifdef CONFIG_TRACER_SNAPSHOT /* arrays with mapped buffer range do not have snapshots */ - if (tr->range_addr_start && t->use_max_tr) + if (tr->range_addr_start && tracer_uses_snapshot(t)) return false; -#endif return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } @@ -5110,15 +4358,15 @@ int tracing_set_cpumask(struct trace_array *tr, if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); -#ifdef CONFIG_TRACER_MAX_TRACE - ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_SNAPSHOT + ring_buffer_record_disable_cpu(tr->snapshot_buffer.buffer, cpu); #endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); -#ifdef CONFIG_TRACER_MAX_TRACE - ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_SNAPSHOT + ring_buffer_record_enable_cpu(tr->snapshot_buffer.buffer, cpu); #endif } } @@ -5327,8 +4575,8 @@ int set_tracer_flag(struct trace_array *tr, u64 mask, int enabled) case TRACE_ITER(OVERWRITE): ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); -#ifdef CONFIG_TRACER_MAX_TRACE - ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); +#ifdef CONFIG_TRACER_SNAPSHOT + ring_buffer_change_overwrite(tr->snapshot_buffer.buffer, enabled); #endif break; @@ -5971,6 +5219,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->array_buffer); + update_last_data_if_empty(tr); return t->init(tr); } @@ -5991,7 +5240,7 @@ static void update_buffer_entries(struct array_buffer *buf, int cpu) } } -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, struct array_buffer *size_buf, int cpu_id) @@ -6017,7 +5266,7 @@ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, return ret; } -#endif /* CONFIG_TRACER_MAX_TRACE */ +#endif /* CONFIG_TRACER_SNAPSHOT */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) @@ -6042,11 +5291,11 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, if (ret < 0) goto out_start; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT if (!tr->allocated_snapshot) goto out; - ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); + ret = ring_buffer_resize(tr->snapshot_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->array_buffer, &tr->array_buffer, cpu); @@ -6071,10 +5320,10 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr, goto out_start; } - update_buffer_entries(&tr->max_buffer, cpu); + update_buffer_entries(&tr->snapshot_buffer, cpu); out: -#endif /* CONFIG_TRACER_MAX_TRACE */ +#endif /* CONFIG_TRACER_SNAPSHOT */ update_buffer_entries(&tr->array_buffer, cpu); out_start: @@ -6265,6 +5514,9 @@ int tracing_update_buffers(struct trace_array *tr) { int ret = 0; + if (!tr) + tr = &global_trace; + guard(mutex)(&trace_types_lock); update_last_data(tr); @@ -6299,9 +5551,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *trace = NULL; struct tracers *t; -#ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; -#endif int ret; guard(mutex)(&trace_types_lock); @@ -6329,7 +5579,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) return 0; #ifdef CONFIG_TRACER_SNAPSHOT - if (trace->use_max_tr) { + if (tracer_uses_snapshot(trace)) { local_irq_disable(); arch_spin_lock(&tr->max_lock); ret = tr->cond_snapshot ? -EBUSY : 0; @@ -6361,14 +5611,13 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) if (tr->current_trace->reset) tr->current_trace->reset(tr); -#ifdef CONFIG_TRACER_MAX_TRACE - had_max_tr = tr->current_trace->use_max_tr; + had_max_tr = tracer_uses_snapshot(tr->current_trace); /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; tr->current_trace_flags = nop_trace.flags; - if (had_max_tr && !trace->use_max_tr) { + if (had_max_tr && !tracer_uses_snapshot(trace)) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from @@ -6381,24 +5630,19 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) tracing_disarm_snapshot(tr); } - if (!had_max_tr && trace->use_max_tr) { + if (!had_max_tr && tracer_uses_snapshot(trace)) { ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; } -#else - tr->current_trace = &nop_trace; -#endif tr->current_trace_flags = t->flags ? : t->tracer->flags; if (trace->init) { ret = tracer_init(trace, tr); if (ret) { -#ifdef CONFIG_TRACER_MAX_TRACE - if (trace->use_max_tr) + if (tracer_uses_snapshot(trace)) tracing_disarm_snapshot(tr); -#endif tr->current_trace_flags = nop_trace.flags; return ret; } @@ -7603,7 +6847,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, unsigned long ip; char *buf; - if (tracing_disabled) + if (unlikely(tracing_disabled)) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER(MARKERS))) @@ -7683,7 +6927,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, ssize_t written = -ENODEV; char *buf; - if (tracing_disabled) + if (unlikely(tracing_disabled)) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER(MARKERS))) @@ -7784,11 +7028,12 @@ int tracing_set_clock(struct trace_array *tr, const char *clockstr) */ tracing_reset_online_cpus(&tr->array_buffer); -#ifdef CONFIG_TRACER_MAX_TRACE - if (tr->max_buffer.buffer) - ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); - tracing_reset_online_cpus(&tr->max_buffer); +#ifdef CONFIG_TRACER_SNAPSHOT + if (tr->snapshot_buffer.buffer) + ring_buffer_set_clock(tr->snapshot_buffer.buffer, trace_clocks[i].func); + tracing_reset_online_cpus(&tr->snapshot_buffer); #endif + update_last_data_if_empty(tr); if (tr->scratch && !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) { struct trace_scratch *tscratch = tr->scratch; @@ -7881,26 +7126,6 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve return ring_buffer_event_time_stamp(buffer, rbe); } -/* - * Set or disable using the per CPU trace_buffer_event when possible. - */ -int tracing_set_filter_buffering(struct trace_array *tr, bool set) -{ - guard(mutex)(&trace_types_lock); - - if (set && tr->no_filter_buffering_ref++) - return 0; - - if (!set) { - if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) - return -EINVAL; - - --tr->no_filter_buffering_ref; - } - - return 0; -} - struct ftrace_buffer_info { struct trace_iterator iter; void *spare; @@ -7939,7 +7164,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) ret = 0; iter->tr = tr; - iter->array_buffer = &tr->max_buffer; + iter->array_buffer = &tr->snapshot_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; @@ -7976,7 +7201,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, guard(mutex)(&trace_types_lock); - if (tr->current_trace->use_max_tr) + if (tracer_uses_snapshot(tr->current_trace)) return -EBUSY; local_irq_disable(); @@ -8002,7 +7227,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, return -EINVAL; #endif if (tr->allocated_snapshot) - ret = resize_buffer_duplicate_size(&tr->max_buffer, + ret = resize_buffer_duplicate_size(&tr->snapshot_buffer, &tr->array_buffer, iter->cpu_file); ret = tracing_arm_snapshot_locked(tr); @@ -8023,9 +7248,9 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) - tracing_reset_online_cpus(&tr->max_buffer); + tracing_reset_online_cpus(&tr->snapshot_buffer); else - tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); + tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file); } break; } @@ -8075,13 +7300,13 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp) info = filp->private_data; - if (info->iter.trace->use_max_tr) { + if (tracer_uses_snapshot(info->iter.trace)) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; - info->iter.array_buffer = &info->iter.tr->max_buffer; + info->iter.array_buffer = &info->iter.tr->snapshot_buffer; return ret; } @@ -8631,10 +7856,8 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, if (!count) return 0; -#ifdef CONFIG_TRACER_MAX_TRACE - if (iter->snapshot && iter->tr->current_trace->use_max_tr) + if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace)) return -EBUSY; -#endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); @@ -8818,10 +8041,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, int entries, i; ssize_t ret = 0; -#ifdef CONFIG_TRACER_MAX_TRACE - if (iter->snapshot && iter->tr->current_trace->use_max_tr) + if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace)) return -EBUSY; -#endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); if (*ppos & (page_size - 1)) @@ -8955,7 +8176,7 @@ static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned return 0; } -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT static int get_snapshot_map(struct trace_array *tr) { int err = 0; @@ -9398,7 +8619,7 @@ tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_stats_fops); - trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, + trace_create_cpu_file("buffer_size_kb", TRACE_MODE_WRITE, d_cpu, tr, cpu, &tracing_entries_fops); if (tr->range_addr_start) @@ -9959,12 +9180,12 @@ buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, if (ret) goto out; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT if (!tr->allocated_snapshot) goto out_max; - ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); + ret = ring_buffer_subbuf_order_set(tr->snapshot_buffer.buffer, order); if (ret) { /* Put back the old order */ cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); @@ -10180,12 +9401,12 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) if (ret) return ret; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT /* Fix mapped buffer trace arrays do not have snapshot buffers */ if (tr->range_addr_start) return 0; - ret = allocate_trace_buffer(tr, &tr->max_buffer, + ret = allocate_trace_buffer(tr, &tr->snapshot_buffer, allocate_snapshot ? size : 1); if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { free_trace_buffer(&tr->array_buffer); @@ -10207,8 +9428,8 @@ static void free_trace_buffers(struct trace_array *tr) free_trace_buffer(&tr->array_buffer); kfree(tr->module_delta); -#ifdef CONFIG_TRACER_MAX_TRACE - free_trace_buffer(&tr->max_buffer); +#ifdef CONFIG_TRACER_SNAPSHOT + free_trace_buffer(&tr->snapshot_buffer); #endif } @@ -10349,7 +9570,7 @@ trace_array_create_systems(const char *name, const char *systems, tr->syscall_buf_sz = global_trace.syscall_buf_sz; tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT spin_lock_init(&tr->snapshot_trigger_lock); #endif tr->current_trace = &nop_trace; @@ -10674,9 +9895,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) create_trace_options_dir(tr); -#ifdef CONFIG_TRACER_MAX_TRACE trace_create_maxlat_file(tr, d_tracer); -#endif if (ftrace_create_function_files(tr, d_tracer)) MEM_FAIL(1, "Could not allocate function filter files"); @@ -10775,7 +9994,7 @@ int tracing_init_dentry(void) extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; -static struct workqueue_struct *eval_map_wq __initdata; +struct workqueue_struct *trace_init_wq __initdata; static struct work_struct eval_map_work __initdata; static struct work_struct tracerfs_init_work __initdata; @@ -10791,15 +10010,15 @@ static int __init trace_eval_init(void) { INIT_WORK(&eval_map_work, eval_map_work_func); - eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); - if (!eval_map_wq) { - pr_err("Unable to allocate eval_map_wq\n"); + trace_init_wq = alloc_workqueue("trace_init_wq", WQ_UNBOUND, 0); + if (!trace_init_wq) { + pr_err("Unable to allocate trace_init_wq\n"); /* Do work here */ eval_map_work_func(&eval_map_work); return -ENOMEM; } - queue_work(eval_map_wq, &eval_map_work); + queue_work(trace_init_wq, &eval_map_work); return 0; } @@ -10808,8 +10027,8 @@ subsys_initcall(trace_eval_init); static int __init trace_eval_sync(void) { /* Make sure the eval map updates are finished */ - if (eval_map_wq) - destroy_workqueue(eval_map_wq); + if (trace_init_wq) + destroy_workqueue(trace_init_wq); return 0; } @@ -10970,9 +10189,9 @@ static __init int tracer_init_tracefs(void) if (ret) return 0; - if (eval_map_wq) { + if (trace_init_wq) { INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); - queue_work(eval_map_wq, &tracerfs_init_work); + queue_work(trace_init_wq, &tracerfs_init_work); } else { tracer_init_tracefs_work_func(NULL); } @@ -11305,7 +10524,7 @@ ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, return done; } -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT __init static bool tr_needs_alloc_snapshot(const char *name) { char *test; @@ -11495,7 +10714,7 @@ __init static void enable_instances(void) } } else { /* Only non mapped buffers have snapshot buffers */ - if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) + if (IS_ENABLED(CONFIG_TRACER_SNAPSHOT)) do_allocate_snapshot(name); } @@ -11622,7 +10841,7 @@ __init static int tracer_alloc_buffers(void) global_trace.current_trace_flags = nop_trace.flags; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT spin_lock_init(&global_trace.snapshot_trigger_lock); #endif ftrace_init_global_array_ops(&global_trace); @@ -11690,7 +10909,7 @@ struct trace_array *trace_get_global_array(void) void __init ftrace_boot_snapshot(void) { -#ifdef CONFIG_TRACER_MAX_TRACE +#ifdef CONFIG_TRACER_SNAPSHOT struct trace_array *tr; if (!snapshot_at_boot) |
