summaryrefslogtreecommitdiff
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 16:39:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-06 16:39:18 -0700
commit5eb6eed7e0fe880dc8de8da203cc888716bbf196 (patch)
tree05a7c83182b34a567c68b35c091cf31200d5ddec /kernel/trace/ring_buffer.c
parent8b5c6a3a49d9ebc7dc288870b9c56c4f946035d8 (diff)
parent591a033dc17ff6f684b6b6d1d7426e22d178194f (diff)
Merge tag 'trace-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "One new feature was added to ftrace, which is the trace_marker now supports triggers. For example: # cd /sys/kernel/debug/tracing # echo 'snapshot' > events/ftrace/print/trigger # echo 'cause snapshot' > trace_marker The rest of the changes are various clean ups and also one stable fix that was added late in the cycle" * tag 'trace-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (21 commits) tracing: Use match_string() instead of open coding it in trace_set_options() branch-check: fix long->int truncation when profiling branches ring-buffer: Fix typo in comment ring-buffer: Fix a bunch of typos in comments tracing/selftest: Add test to test simple snapshot trigger for trace_marker tracing/selftest: Add test to test hist trigger between kernel event and trace_marker tracing/selftest: Add selftests to test trace_marker histogram triggers ftrace/selftest: Fix reset_trigger() to handle triggers with filters ftrace/selftest: Have the reset_trigger code be a bit more careful tracing: Document trace_marker triggers tracing: Allow histogram triggers to access ftrace internal events tracing: Prevent further users of zero size static arrays in trace events tracing: Have zero size length in filter logic be full string tracing: Add trigger file for trace_markers tracefs/ftrace/print tracing: Do not show filter file for ftrace internal events tracing: Add brackets in ftrace event dynamic arrays tracing: Have event_trace_init() called by trace_init_tracefs() tracing: Add __find_event_file() to find event files without restrictions tracing: Do not reference event data in post call triggers tracepoints: Fix the descriptions of tracepoint_probe_register{_prio} ...
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c9cb9767d49b..6a46af21765c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -809,7 +809,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
*
* You can see, it is legitimate for the previous pointer of
* the head (or any page) not to point back to itself. But only
- * temporarially.
+ * temporarily.
*/
#define RB_PAGE_NORMAL 0UL
@@ -906,7 +906,7 @@ static void rb_list_head_clear(struct list_head *list)
}
/*
- * rb_head_page_dactivate - clears head page ptr (for free list)
+ * rb_head_page_deactivate - clears head page ptr (for free list)
*/
static void
rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
@@ -1780,7 +1780,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
put_online_cpus();
} else {
- /* Make sure this CPU has been intitialized */
+ /* Make sure this CPU has been initialized */
if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
goto out;
@@ -2325,7 +2325,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
/*
* If we need to add a timestamp, then we
- * add it to the start of the resevered space.
+ * add it to the start of the reserved space.
*/
if (unlikely(info->add_timestamp)) {
bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
@@ -2681,7 +2681,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
* ring_buffer_nest_start - Allow to trace while nested
* @buffer: The ring buffer to modify
*
- * The ring buffer has a safty mechanism to prevent recursion.
+ * The ring buffer has a safety mechanism to prevent recursion.
* But there may be a case where a trace needs to be done while
* tracing something else. In this case, calling this function
* will allow this function to nest within a currently active
@@ -2699,7 +2699,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
preempt_disable_notrace();
cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- /* This is the shift value for the above recusive locking */
+ /* This is the shift value for the above recursive locking */
cpu_buffer->nest += NESTED_BITS;
}
@@ -2718,7 +2718,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
/* disabled by ring_buffer_nest_start() */
cpu = raw_smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- /* This is the shift value for the above recusive locking */
+ /* This is the shift value for the above recursive locking */
cpu_buffer->nest -= NESTED_BITS;
preempt_enable_notrace();
}
@@ -2907,7 +2907,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* @buffer: the ring buffer to reserve from
* @length: the length of the data to reserve (excluding event header)
*
- * Returns a reseverd event on the ring buffer to copy directly to.
+ * Returns a reserved event on the ring buffer to copy directly to.
* The user of this interface will need to get the body to write into
* and can use the ring_buffer_event_data() interface.
*
@@ -3009,7 +3009,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* This function lets the user discard an event in the ring buffer
* and then that event will not be read later.
*
- * This function only works if it is called before the the item has been
+ * This function only works if it is called before the item has been
* committed. It will try to free the event from the ring buffer
* if another event has not been added behind it.
*
@@ -4127,7 +4127,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* through the buffer. Memory is allocated, buffer recording
* is disabled, and the iterator pointer is returned to the caller.
*
- * Disabling buffer recordng prevents the reading from being
+ * Disabling buffer recording prevents the reading from being
* corrupted. This is not a consuming read, so a producer is not
* expected.
*