From e0c48bf9e80ceefb83d74999adeeddbdd95f4c1d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 23 Apr 2024 16:32:48 +0300 Subject: perf scripts python: Add a script to run instances of 'perf script' in parallel Add a Python script to run a perf script command multiple times in parallel, using perf script options --cpu and --time so that each job processes a different chunk of the data. Extend perf script tests to test also the new script. The script supports the use of normal 'perf script' options like --dlfilter and --script, so that the benefit of running parallel jobs naturally extends to them also. In addition, a command can be provided (refer --pipe-to option) to pipe standard output to a custom command. Refer to the script's own help text at the end of the patch for more details. The script is useful for Intel PT traces, that can be efficiently decoded by 'perf script' when split by CPU and/or time ranges. Running jobs in parallel can decrease the overall decoding time. Committer testing: Ian reported that shellcheck found some issues, I installed it as there are no warnings about it not being available, but when available it fails the build with: TEST /tmp/build/perf-tools-next/tests/shell/script.sh.shellcheck_log CC /tmp/build/perf-tools-next/util/header.o In tests/shell/script.sh line 20: rm -rf "${temp_dir}/"* ^-------------^ SC2115 (warning): Use "${var:?}" to ensure this never expands to /* . In tests/shell/script.sh line 83: output1_dir="${temp_dir}/output1" ^---------^ SC2034 (warning): output1_dir appears unused. Verify use (or export if used externally). In tests/shell/script.sh line 84: output2_dir="${temp_dir}/output2" ^---------^ SC2034 (warning): output2_dir appears unused. Verify use (or export if used externally). In tests/shell/script.sh line 86: python3 "${pp}" -o "${output_dir}" --jobs 4 --verbose -- perf script -i "${perf_data}" ^-----------^ SC2154 (warning): output_dir is referenced but not assigned (did you mean 'output1_dir'?). For more information: https://www.shellcheck.net/wiki/SC2034 -- output1_dir appears unused. Verif... https://www.shellcheck.net/wiki/SC2115 -- Use "${var:?}" to ensure this nev... https://www.shellcheck.net/wiki/SC2154 -- output_dir is referenced but not ... Did these fixes: - rm -rf "${temp_dir}/"* + rm -rf "${temp_dir:?}/"* And: @@ -83,8 +83,8 @@ test_parallel_perf() output1_dir="${temp_dir}/output1" output2_dir="${temp_dir}/output2" perf record -o "${perf_data}" --sample-cpu uname - python3 "${pp}" -o "${output_dir}" --jobs 4 --verbose -- perf script -i "${perf_data}" - python3 "${pp}" -o "${output_dir}" --jobs 4 --verbose --per-cpu -- perf script -i "${perf_data}" + python3 "${pp}" -o "${output1_dir}" --jobs 4 --verbose -- perf script -i "${perf_data}" + python3 "${pp}" -o "${output2_dir}" --jobs 4 --verbose --per-cpu -- perf script -i "${perf_data}" After that: root@number:~# perf test -vv "perf script tests" 97: perf script tests: --- start --- test child forked, pid 4084139 DB test [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.032 MB /tmp/perf-test-script.T4MJDr0L6J/perf.data (7 samples) ] DB test [Success] parallel-perf test Linux [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.034 MB /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data (7 samples) ] Starting: perf script --time=,91898.301878499 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --time=91898.301878500,91898.301905999 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --time=91898.301906000,91898.301933499 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --time=91898.301933500, -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --time=91898.301878500,91898.301905999 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --time=91898.301906000,91898.301933499 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 4 jobs: 2 completed, 2 running Finished: perf script --time=,91898.301878499 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --time=91898.301933500, -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 4 jobs: 4 completed, 0 running All jobs finished successfully parallel-perf.py done Starting: perf script --cpu=0 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=1 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=2 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=3 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=0 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=1 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=2 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=3 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 4 completed, 0 running Starting: perf script --cpu=4 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=5 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=6 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=7 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=4 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=5 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=6 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=7 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 8 completed, 0 running Starting: perf script --cpu=8 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=9 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=10 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=11 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=8 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=9 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=10 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=11 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 12 completed, 0 running Starting: perf script --cpu=12 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=13 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=14 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=15 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=12 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=13 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=14 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=15 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 16 completed, 0 running Starting: perf script --cpu=16 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=17 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=18 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=19 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=16 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=17 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=18 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=19 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 20 completed, 0 running Starting: perf script --cpu=20 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=21 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=22 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=23 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=20 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=21 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=22 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=23 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 24 completed, 0 running Starting: perf script --cpu=24 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=25 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=26 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Starting: perf script --cpu=27 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=25 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=26 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data Finished: perf script --cpu=27 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 27 completed, 1 running Finished: perf script --cpu=24 -i /tmp/perf-test-script.T4MJDr0L6J/pp-perf.data There are 28 jobs: 28 completed, 0 running All jobs finished successfully parallel-perf.py done parallel-perf test [Success] --- Cleaning up --- ---- end(0) ---- 97: perf script tests : Ok root@number:~# Reviewed-by: Andi Kleen Signed-off-by: Adrian Hunter Tested-by: Arnaldo Carvalho de Melo Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/r/20240423133248.10206-1-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/parallel-perf.py | 988 +++++++++++++++++++++++++++++ 1 file changed, 988 insertions(+) create mode 100755 tools/perf/scripts/python/parallel-perf.py (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/parallel-perf.py b/tools/perf/scripts/python/parallel-perf.py new file mode 100755 index 000000000000..21f32ec5ed46 --- /dev/null +++ b/tools/perf/scripts/python/parallel-perf.py @@ -0,0 +1,988 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Run a perf script command multiple times in parallel, using perf script +# options --cpu and --time so that each job processes a different chunk +# of the data. +# +# Copyright (c) 2024, Intel Corporation. + +import subprocess +import argparse +import pathlib +import shlex +import time +import copy +import sys +import os +import re + +glb_prog_name = "parallel-perf.py" +glb_min_interval = 10.0 +glb_min_samples = 64 + +class Verbosity(): + + def __init__(self, quiet=False, verbose=False, debug=False): + self.normal = True + self.verbose = verbose + self.debug = debug + self.self_test = True + if self.debug: + self.verbose = True + if self.verbose: + quiet = False + if quiet: + self.normal = False + +# Manage work (Start/Wait/Kill), as represented by a subprocess.Popen command +class Work(): + + def __init__(self, cmd, pipe_to, output_dir="."): + self.popen = None + self.consumer = None + self.cmd = cmd + self.pipe_to = pipe_to + self.output_dir = output_dir + self.cmdout_name = f"{output_dir}/cmd.txt" + self.stdout_name = f"{output_dir}/out.txt" + self.stderr_name = f"{output_dir}/err.txt" + + def Command(self): + sh_cmd = [ shlex.quote(x) for x in self.cmd ] + return " ".join(self.cmd) + + def Stdout(self): + return open(self.stdout_name, "w") + + def Stderr(self): + return open(self.stderr_name, "w") + + def CreateOutputDir(self): + pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True) + + def Start(self): + if self.popen: + return + self.CreateOutputDir() + with open(self.cmdout_name, "w") as f: + f.write(self.Command()) + f.write("\n") + stdout = self.Stdout() + stderr = self.Stderr() + if self.pipe_to: + self.popen = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=stderr) + args = shlex.split(self.pipe_to) + self.consumer = subprocess.Popen(args, stdin=self.popen.stdout, stdout=stdout, stderr=stderr) + else: + self.popen = subprocess.Popen(self.cmd, stdout=stdout, stderr=stderr) + + def RemoveEmptyErrFile(self): + if os.path.exists(self.stderr_name): + if os.path.getsize(self.stderr_name) == 0: + os.unlink(self.stderr_name) + + def Errors(self): + if os.path.exists(self.stderr_name): + if os.path.getsize(self.stderr_name) != 0: + return [ f"Non-empty error file {self.stderr_name}" ] + return [] + + def TidyUp(self): + self.RemoveEmptyErrFile() + + def RawPollWait(self, p, wait): + if wait: + return p.wait() + return p.poll() + + def Poll(self, wait=False): + if not self.popen: + return None + result = self.RawPollWait(self.popen, wait) + if self.consumer: + res = result + result = self.RawPollWait(self.consumer, wait) + if result != None and res == None: + self.popen.kill() + result = None + elif result == 0 and res != None and res != 0: + result = res + if result != None: + self.TidyUp() + return result + + def Wait(self): + return self.Poll(wait=True) + + def Kill(self): + if not self.popen: + return + self.popen.kill() + if self.consumer: + self.consumer.kill() + +def KillWork(worklist, verbosity): + for w in worklist: + w.Kill() + for w in worklist: + w.Wait() + +def NumberOfCPUs(): + return os.sysconf("SC_NPROCESSORS_ONLN") + +def NanoSecsToSecsStr(x): + if x == None: + return "" + x = str(x) + if len(x) < 10: + x = "0" * (10 - len(x)) + x + return x[:len(x) - 9] + "." + x[-9:] + +def InsertOptionAfter(cmd, option, after): + try: + pos = cmd.index(after) + cmd.insert(pos + 1, option) + except: + cmd.append(option) + +def CreateWorkList(cmd, pipe_to, output_dir, cpus, time_ranges_by_cpu): + max_len = len(str(cpus[-1])) + cpu_dir_fmt = f"cpu-%.{max_len}u" + worklist = [] + pos = 0 + for cpu in cpus: + if cpu >= 0: + cpu_dir = os.path.join(output_dir, cpu_dir_fmt % cpu) + cpu_option = f"--cpu={cpu}" + else: + cpu_dir = output_dir + cpu_option = None + + tr_dir_fmt = "time-range" + + if len(time_ranges_by_cpu) > 1: + time_ranges = time_ranges_by_cpu[pos] + tr_dir_fmt += f"-{pos}" + pos += 1 + else: + time_ranges = time_ranges_by_cpu[0] + + max_len = len(str(len(time_ranges))) + tr_dir_fmt += f"-%.{max_len}u" + + i = 0 + for r in time_ranges: + if r == [None, None]: + time_option = None + work_output_dir = cpu_dir + else: + time_option = "--time=" + NanoSecsToSecsStr(r[0]) + "," + NanoSecsToSecsStr(r[1]) + work_output_dir = os.path.join(cpu_dir, tr_dir_fmt % i) + i += 1 + work_cmd = list(cmd) + if time_option != None: + InsertOptionAfter(work_cmd, time_option, "script") + if cpu_option != None: + InsertOptionAfter(work_cmd, cpu_option, "script") + w = Work(work_cmd, pipe_to, work_output_dir) + worklist.append(w) + return worklist + +def DoRunWork(worklist, nr_jobs, verbosity): + nr_to_do = len(worklist) + not_started = list(worklist) + running = [] + done = [] + chg = False + while True: + nr_done = len(done) + if chg and verbosity.normal: + nr_run = len(running) + print(f"\rThere are {nr_to_do} jobs: {nr_done} completed, {nr_run} running", flush=True, end=" ") + if verbosity.verbose: + print() + chg = False + if nr_done == nr_to_do: + break + while len(running) < nr_jobs and len(not_started): + w = not_started.pop(0) + running.append(w) + if verbosity.verbose: + print("Starting:", w.Command()) + w.Start() + chg = True + if len(running): + time.sleep(0.1) + finished = [] + not_finished = [] + while len(running): + w = running.pop(0) + r = w.Poll() + if r == None: + not_finished.append(w) + continue + if r == 0: + if verbosity.verbose: + print("Finished:", w.Command()) + finished.append(w) + chg = True + continue + if verbosity.normal and not verbosity.verbose: + print() + print("Job failed!\n return code:", r, "\n command: ", w.Command()) + if w.pipe_to: + print(" piped to: ", w.pipe_to) + print("Killing outstanding jobs") + KillWork(not_finished, verbosity) + KillWork(running, verbosity) + return False + running = not_finished + done += finished + errorlist = [] + for w in worklist: + errorlist += w.Errors() + if len(errorlist): + print("Errors:") + for e in errorlist: + print(e) + elif verbosity.normal: + print("\r"," "*50, "\rAll jobs finished successfully", flush=True) + return True + +def RunWork(worklist, nr_jobs=NumberOfCPUs(), verbosity=Verbosity()): + try: + return DoRunWork(worklist, nr_jobs, verbosity) + except: + for w in worklist: + w.Kill() + raise + return True + +def ReadHeader(perf, file_name): + return subprocess.Popen([perf, "script", "--header-only", "--input", file_name], stdout=subprocess.PIPE).stdout.read().decode("utf-8") + +def ParseHeader(hdr): + result = {} + lines = hdr.split("\n") + for line in lines: + if ":" in line and line[0] == "#": + pos = line.index(":") + name = line[1:pos-1].strip() + value = line[pos+1:].strip() + if name in result: + orig_name = name + nr = 2 + while True: + name = f"{orig_name} {nr}" + if name not in result: + break + nr += 1 + result[name] = value + return result + +def HeaderField(hdr_dict, hdr_fld): + if hdr_fld not in hdr_dict: + raise Exception(f"'{hdr_fld}' missing from header information") + return hdr_dict[hdr_fld] + +# Represent the position of an option within a command string +# and provide the option value and/or remove the option +class OptPos(): + + def Init(self, opt_element=-1, value_element=-1, opt_pos=-1, value_pos=-1, error=None): + self.opt_element = opt_element # list element that contains option + self.value_element = value_element # list element that contains option value + self.opt_pos = opt_pos # string position of option + self.value_pos = value_pos # string position of value + self.error = error # error message string + + def __init__(self, args, short_name, long_name, default=None): + self.args = list(args) + self.default = default + n = 2 + len(long_name) + m = len(short_name) + pos = -1 + for opt in args: + pos += 1 + if m and opt[:2] == f"-{short_name}": + if len(opt) == 2: + if pos + 1 < len(args): + self.Init(pos, pos + 1, 0, 0) + else: + self.Init(error = f"-{short_name} option missing value") + else: + self.Init(pos, pos, 0, 2) + return + if opt[:n] == f"--{long_name}": + if len(opt) == n: + if pos + 1 < len(args): + self.Init(pos, pos + 1, 0, 0) + else: + self.Init(error = f"--{long_name} option missing value") + elif opt[n] == "=": + self.Init(pos, pos, 0, n + 1) + else: + self.Init(error = f"--{long_name} option expected '='") + return + if m and opt[:1] == "-" and opt[:2] != "--" and short_name in opt: + ipos = opt.index(short_name) + if "-" in opt[1:]: + hpos = opt[1:].index("-") + if hpos < ipos: + continue + if ipos + 1 == len(opt): + if pos + 1 < len(args): + self.Init(pos, pos + 1, ipos, 0) + else: + self.Init(error = f"-{short_name} option missing value") + else: + self.Init(pos, pos, ipos, ipos + 1) + return + self.Init() + + def Value(self): + if self.opt_element >= 0: + if self.opt_element != self.value_element: + return self.args[self.value_element] + else: + return self.args[self.value_element][self.value_pos:] + return self.default + + def Remove(self, args): + if self.opt_element == -1: + return + if self.opt_element != self.value_element: + del args[self.value_element] + if self.opt_pos: + args[self.opt_element] = args[self.opt_element][:self.opt_pos] + else: + del args[self.opt_element] + +def DetermineInputFileName(cmd): + p = OptPos(cmd, "i", "input", "perf.data") + if p.error: + raise Exception(f"perf command {p.error}") + file_name = p.Value() + if not os.path.exists(file_name): + raise Exception(f"perf command input file '{file_name}' not found") + return file_name + +def ReadOption(args, short_name, long_name, err_prefix, remove=False): + p = OptPos(args, short_name, long_name) + if p.error: + raise Exception(f"{err_prefix}{p.error}") + value = p.Value() + if remove: + p.Remove(args) + return value + +def ExtractOption(args, short_name, long_name, err_prefix): + return ReadOption(args, short_name, long_name, err_prefix, True) + +def ReadPerfOption(args, short_name, long_name): + return ReadOption(args, short_name, long_name, "perf command ") + +def ExtractPerfOption(args, short_name, long_name): + return ExtractOption(args, short_name, long_name, "perf command ") + +def PerfDoubleQuickCommands(cmd, file_name): + cpu_str = ReadPerfOption(cmd, "C", "cpu") + time_str = ReadPerfOption(cmd, "", "time") + # Use double-quick sampling to determine trace data density + times_cmd = ["perf", "script", "--ns", "--input", file_name, "--itrace=qqi"] + if cpu_str != None and cpu_str != "": + times_cmd.append(f"--cpu={cpu_str}") + if time_str != None and time_str != "": + times_cmd.append(f"--time={time_str}") + cnts_cmd = list(times_cmd) + cnts_cmd.append("-Fcpu") + times_cmd.append("-Fcpu,time") + return cnts_cmd, times_cmd + +class CPUTimeRange(): + def __init__(self, cpu): + self.cpu = cpu + self.sample_cnt = 0 + self.time_ranges = None + self.interval = 0 + self.interval_remaining = 0 + self.remaining = 0 + self.tr_pos = 0 + +def CalcTimeRangesByCPU(line, cpu, cpu_time_ranges, max_time): + cpu_time_range = cpu_time_ranges[cpu] + cpu_time_range.remaining -= 1 + cpu_time_range.interval_remaining -= 1 + if cpu_time_range.remaining == 0: + cpu_time_range.time_ranges[cpu_time_range.tr_pos][1] = max_time + return + if cpu_time_range.interval_remaining == 0: + time = TimeVal(line[1][:-1], 0) + time_ranges = cpu_time_range.time_ranges + time_ranges[cpu_time_range.tr_pos][1] = time - 1 + time_ranges.append([time, max_time]) + cpu_time_range.tr_pos += 1 + cpu_time_range.interval_remaining = cpu_time_range.interval + +def CountSamplesByCPU(line, cpu, cpu_time_ranges): + try: + cpu_time_ranges[cpu].sample_cnt += 1 + except: + print("exception") + print("cpu", cpu) + print("len(cpu_time_ranges)", len(cpu_time_ranges)) + raise + +def ProcessCommandOutputLines(cmd, per_cpu, fn, *x): + # Assume CPU number is at beginning of line and enclosed by [] + pat = re.compile(r"\s*\[[0-9]+\]") + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + while True: + if line := p.stdout.readline(): + line = line.decode("utf-8") + if pat.match(line): + line = line.split() + if per_cpu: + # Assumes CPU number is enclosed by [] + cpu = int(line[0][1:-1]) + else: + cpu = 0 + fn(line, cpu, *x) + else: + break + p.wait() + +def IntersectTimeRanges(new_time_ranges, time_ranges): + pos = 0 + new_pos = 0 + # Can assume len(time_ranges) != 0 and len(new_time_ranges) != 0 + # Note also, there *must* be at least one intersection. + while pos < len(time_ranges) and new_pos < len(new_time_ranges): + # new end < old start => no intersection, remove new + if new_time_ranges[new_pos][1] < time_ranges[pos][0]: + del new_time_ranges[new_pos] + continue + # new start > old end => no intersection, check next + if new_time_ranges[new_pos][0] > time_ranges[pos][1]: + pos += 1 + if pos < len(time_ranges): + continue + # no next, so remove remaining + while new_pos < len(new_time_ranges): + del new_time_ranges[new_pos] + return + # Found an intersection + # new start < old start => adjust new start = old start + if new_time_ranges[new_pos][0] < time_ranges[pos][0]: + new_time_ranges[new_pos][0] = time_ranges[pos][0] + # new end > old end => keep the overlap, insert the remainder + if new_time_ranges[new_pos][1] > time_ranges[pos][1]: + r = [ time_ranges[pos][1] + 1, new_time_ranges[new_pos][1] ] + new_time_ranges[new_pos][1] = time_ranges[pos][1] + new_pos += 1 + new_time_ranges.insert(new_pos, r) + continue + # new [start, end] is within old [start, end] + new_pos += 1 + +def SplitTimeRangesByTraceDataDensity(time_ranges, cpus, nr, cmd, file_name, per_cpu, min_size, min_interval, verbosity): + if verbosity.normal: + print("\rAnalyzing...", flush=True, end=" ") + if verbosity.verbose: + print() + cnts_cmd, times_cmd = PerfDoubleQuickCommands(cmd, file_name) + + nr_cpus = cpus[-1] + 1 if per_cpu else 1 + if per_cpu: + nr_cpus = cpus[-1] + 1 + cpu_time_ranges = [ CPUTimeRange(cpu) for cpu in range(nr_cpus) ] + else: + nr_cpus = 1 + cpu_time_ranges = [ CPUTimeRange(-1) ] + + if verbosity.debug: + print("nr_cpus", nr_cpus) + print("cnts_cmd", cnts_cmd) + print("times_cmd", times_cmd) + + # Count the number of "double quick" samples per CPU + ProcessCommandOutputLines(cnts_cmd, per_cpu, CountSamplesByCPU, cpu_time_ranges) + + tot = 0 + mx = 0 + for cpu_time_range in cpu_time_ranges: + cnt = cpu_time_range.sample_cnt + tot += cnt + if cnt > mx: + mx = cnt + if verbosity.debug: + print("cpu:", cpu_time_range.cpu, "sample_cnt", cnt) + + if min_size < 1: + min_size = 1 + + if mx < min_size: + # Too little data to be worth splitting + if verbosity.debug: + print("Too little data to split by time") + if nr == 0: + nr = 1 + return [ SplitTimeRangesIntoN(time_ranges, nr, min_interval) ] + + if nr: + divisor = nr + min_size = 1 + else: + divisor = NumberOfCPUs() + + interval = int(round(tot / divisor, 0)) + if interval < min_size: + interval = min_size + + if verbosity.debug: + print("divisor", divisor) + print("min_size", min_size) + print("interval", interval) + + min_time = time_ranges[0][0] + max_time = time_ranges[-1][1] + + for cpu_time_range in cpu_time_ranges: + cnt = cpu_time_range.sample_cnt + if cnt == 0: + cpu_time_range.time_ranges = copy.deepcopy(time_ranges) + continue + # Adjust target interval for CPU to give approximately equal interval sizes + # Determine number of intervals, rounding to nearest integer + n = int(round(cnt / interval, 0)) + if n < 1: + n = 1 + # Determine interval size, rounding up + d, m = divmod(cnt, n) + if m: + d += 1 + cpu_time_range.interval = d + cpu_time_range.interval_remaining = d + cpu_time_range.remaining = cnt + # Init. time ranges for each CPU with the start time + cpu_time_range.time_ranges = [ [min_time, max_time] ] + + # Set time ranges so that the same number of "double quick" samples + # will fall into each time range. + ProcessCommandOutputLines(times_cmd, per_cpu, CalcTimeRangesByCPU, cpu_time_ranges, max_time) + + for cpu_time_range in cpu_time_ranges: + if cpu_time_range.sample_cnt: + IntersectTimeRanges(cpu_time_range.time_ranges, time_ranges) + + return [cpu_time_ranges[cpu].time_ranges for cpu in cpus] + +def SplitSingleTimeRangeIntoN(time_range, n): + if n <= 1: + return [time_range] + start = time_range[0] + end = time_range[1] + duration = int((end - start + 1) / n) + if duration < 1: + return [time_range] + time_ranges = [] + for i in range(n): + time_ranges.append([start, start + duration - 1]) + start += duration + time_ranges[-1][1] = end + return time_ranges + +def TimeRangeDuration(r): + return r[1] - r[0] + 1 + +def TotalDuration(time_ranges): + duration = 0 + for r in time_ranges: + duration += TimeRangeDuration(r) + return duration + +def SplitTimeRangesByInterval(time_ranges, interval): + new_ranges = [] + for r in time_ranges: + duration = TimeRangeDuration(r) + n = duration / interval + n = int(round(n, 0)) + new_ranges += SplitSingleTimeRangeIntoN(r, n) + return new_ranges + +def SplitTimeRangesIntoN(time_ranges, n, min_interval): + if n <= len(time_ranges): + return time_ranges + duration = TotalDuration(time_ranges) + interval = duration / n + if interval < min_interval: + interval = min_interval + return SplitTimeRangesByInterval(time_ranges, interval) + +def RecombineTimeRanges(tr): + new_tr = copy.deepcopy(tr) + n = len(new_tr) + i = 1 + while i < len(new_tr): + # if prev end + 1 == cur start, combine them + if new_tr[i - 1][1] + 1 == new_tr[i][0]: + new_tr[i][0] = new_tr[i - 1][0] + del new_tr[i - 1] + else: + i += 1 + return new_tr + +def OpenTimeRangeEnds(time_ranges, min_time, max_time): + if time_ranges[0][0] <= min_time: + time_ranges[0][0] = None + if time_ranges[-1][1] >= max_time: + time_ranges[-1][1] = None + +def BadTimeStr(time_str): + raise Exception(f"perf command bad time option: '{time_str}'\nCheck also 'time of first sample' and 'time of last sample' in perf script --header-only") + +def ValidateTimeRanges(time_ranges, time_str): + n = len(time_ranges) + for i in range(n): + start = time_ranges[i][0] + end = time_ranges[i][1] + if i != 0 and start <= time_ranges[i - 1][1]: + BadTimeStr(time_str) + if start > end: + BadTimeStr(time_str) + +def TimeVal(s, dflt): + s = s.strip() + if s == "": + return dflt + a = s.split(".") + if len(a) > 2: + raise Exception(f"Bad time value'{s}'") + x = int(a[0]) + if x < 0: + raise Exception("Negative time not allowed") + x *= 1000000000 + if len(a) > 1: + x += int((a[1] + "000000000")[:9]) + return x + +def BadCPUStr(cpu_str): + raise Exception(f"perf command bad cpu option: '{cpu_str}'\nCheck also 'nrcpus avail' in perf script --header-only") + +def ParseTimeStr(time_str, min_time, max_time): + if time_str == None or time_str == "": + return [[min_time, max_time]] + time_ranges = [] + for r in time_str.split(): + a = r.split(",") + if len(a) != 2: + BadTimeStr(time_str) + try: + start = TimeVal(a[0], min_time) + end = TimeVal(a[1], max_time) + except: + BadTimeStr(time_str) + time_ranges.append([start, end]) + ValidateTimeRanges(time_ranges, time_str) + return time_ranges + +def ParseCPUStr(cpu_str, nr_cpus): + if cpu_str == None or cpu_str == "": + return [-1] + cpus = [] + for r in cpu_str.split(","): + a = r.split("-") + if len(a) < 1 or len(a) > 2: + BadCPUStr(cpu_str) + try: + start = int(a[0].strip()) + if len(a) > 1: + end = int(a[1].strip()) + else: + end = start + except: + BadCPUStr(cpu_str) + if start < 0 or end < 0 or end < start or end >= nr_cpus: + BadCPUStr(cpu_str) + cpus.extend(range(start, end + 1)) + cpus = list(set(cpus)) # Remove duplicates + cpus.sort() + return cpus + +class ParallelPerf(): + + def __init__(self, a): + for arg_name in vars(a): + setattr(self, arg_name, getattr(a, arg_name)) + self.orig_nr = self.nr + self.orig_cmd = list(self.cmd) + self.perf = self.cmd[0] + if os.path.exists(self.output_dir): + raise Exception(f"Output '{self.output_dir}' already exists") + if self.jobs < 0 or self.nr < 0 or self.interval < 0: + raise Exception("Bad options (negative values): try -h option for help") + if self.nr != 0 and self.interval != 0: + raise Exception("Cannot specify number of time subdivisions and time interval") + if self.jobs == 0: + self.jobs = NumberOfCPUs() + if self.nr == 0 and self.interval == 0: + if self.per_cpu: + self.nr = 1 + else: + self.nr = self.jobs + + def Init(self): + if self.verbosity.debug: + print("cmd", self.cmd) + self.file_name = DetermineInputFileName(self.cmd) + self.hdr = ReadHeader(self.perf, self.file_name) + self.hdr_dict = ParseHeader(self.hdr) + self.cmd_line = HeaderField(self.hdr_dict, "cmdline") + + def ExtractTimeInfo(self): + self.min_time = TimeVal(HeaderField(self.hdr_dict, "time of first sample"), 0) + self.max_time = TimeVal(HeaderField(self.hdr_dict, "time of last sample"), 0) + self.time_str = ExtractPerfOption(self.cmd, "", "time") + self.time_ranges = ParseTimeStr(self.time_str, self.min_time, self.max_time) + if self.verbosity.debug: + print("time_ranges", self.time_ranges) + + def ExtractCPUInfo(self): + if self.per_cpu: + nr_cpus = int(HeaderField(self.hdr_dict, "nrcpus avail")) + self.cpu_str = ExtractPerfOption(self.cmd, "C", "cpu") + if self.cpu_str == None or self.cpu_str == "": + self.cpus = [ x for x in range(nr_cpus) ] + else: + self.cpus = ParseCPUStr(self.cpu_str, nr_cpus) + else: + self.cpu_str = None + self.cpus = [-1] + if self.verbosity.debug: + print("cpus", self.cpus) + + def IsIntelPT(self): + return self.cmd_line.find("intel_pt") >= 0 + + def SplitTimeRanges(self): + if self.IsIntelPT() and self.interval == 0: + self.split_time_ranges_for_each_cpu = \ + SplitTimeRangesByTraceDataDensity(self.time_ranges, self.cpus, self.orig_nr, + self.orig_cmd, self.file_name, self.per_cpu, + self.min_size, self.min_interval, self.verbosity) + elif self.nr: + self.split_time_ranges_for_each_cpu = [ SplitTimeRangesIntoN(self.time_ranges, self.nr, self.min_interval) ] + else: + self.split_time_ranges_for_each_cpu = [ SplitTimeRangesByInterval(self.time_ranges, self.interval) ] + + def CheckTimeRanges(self): + for tr in self.split_time_ranges_for_each_cpu: + # Re-combined time ranges should be the same + new_tr = RecombineTimeRanges(tr) + if new_tr != self.time_ranges: + if self.verbosity.debug: + print("tr", tr) + print("new_tr", new_tr) + raise Exception("Self test failed!") + + def OpenTimeRangeEnds(self): + for time_ranges in self.split_time_ranges_for_each_cpu: + OpenTimeRangeEnds(time_ranges, self.min_time, self.max_time) + + def CreateWorkList(self): + self.worklist = CreateWorkList(self.cmd, self.pipe_to, self.output_dir, self.cpus, self.split_time_ranges_for_each_cpu) + + def PerfDataRecordedPerCPU(self): + if "--per-thread" in self.cmd_line.split(): + return False + return True + + def DefaultToPerCPU(self): + # --no-per-cpu option takes precedence + if self.no_per_cpu: + return False + if not self.PerfDataRecordedPerCPU(): + return False + # Default to per-cpu for Intel PT data that was recorded per-cpu, + # because decoding can be done for each CPU separately. + if self.IsIntelPT(): + return True + return False + + def Config(self): + self.Init() + self.ExtractTimeInfo() + if not self.per_cpu: + self.per_cpu = self.DefaultToPerCPU() + if self.verbosity.debug: + print("per_cpu", self.per_cpu) + self.ExtractCPUInfo() + self.SplitTimeRanges() + if self.verbosity.self_test: + self.CheckTimeRanges() + # Prefer open-ended time range to starting / ending with min_time / max_time resp. + self.OpenTimeRangeEnds() + self.CreateWorkList() + + def Run(self): + if self.dry_run: + print(len(self.worklist),"jobs:") + for w in self.worklist: + print(w.Command()) + return True + result = RunWork(self.worklist, self.jobs, verbosity=self.verbosity) + if self.verbosity.verbose: + print(glb_prog_name, "done") + return result + +def RunParallelPerf(a): + pp = ParallelPerf(a) + pp.Config() + return pp.Run() + +def Main(args): + ap = argparse.ArgumentParser( + prog=glb_prog_name, formatter_class = argparse.RawDescriptionHelpFormatter, + description = +""" +Run a perf script command multiple times in parallel, using perf script options +--cpu and --time so that each job processes a different chunk of the data. +""", + epilog = +""" +Follow the options by '--' and then the perf script command e.g. + + $ perf record -a -- sleep 10 + $ parallel-perf.py --nr=4 -- perf script --ns + All jobs finished successfully + $ tree parallel-perf-output/ + parallel-perf-output/ + ├── time-range-0 + │   ├── cmd.txt + │   └── out.txt + ├── time-range-1 + │   ├── cmd.txt + │   └── out.txt + ├── time-range-2 + │   ├── cmd.txt + │   └── out.txt + └── time-range-3 + ├── cmd.txt + └── out.txt + $ find parallel-perf-output -name cmd.txt | sort | xargs grep -H . + parallel-perf-output/time-range-0/cmd.txt:perf script --time=,9466.504461499 --ns + parallel-perf-output/time-range-1/cmd.txt:perf script --time=9466.504461500,9469.005396999 --ns + parallel-perf-output/time-range-2/cmd.txt:perf script --time=9469.005397000,9471.506332499 --ns + parallel-perf-output/time-range-3/cmd.txt:perf script --time=9471.506332500, --ns + +Any perf script command can be used, including the use of perf script options +--dlfilter and --script, so that the benefit of running parallel jobs +naturally extends to them also. + +If option --pipe-to is used, standard output is first piped through that +command. Beware, if the command fails (e.g. grep with no matches), it will be +considered a fatal error. + +Final standard output is redirected to files named out.txt in separate +subdirectories under the output directory. Similarly, standard error is +written to files named err.txt. In addition, files named cmd.txt contain the +corresponding perf script command. After processing, err.txt files are removed +if they are empty. + +If any job exits with a non-zero exit code, then all jobs are killed and no +more are started. A message is printed if any job results in a non-empty +err.txt file. + +There is a separate output subdirectory for each time range. If the --per-cpu +option is used, these are further grouped under cpu-n subdirectories, e.g. + + $ parallel-perf.py --per-cpu --nr=2 -- perf script --ns --cpu=0,1 + All jobs finished successfully + $ tree parallel-perf-output + parallel-perf-output/ + ├── cpu-0 + │   ├── time-range-0 + │   │   ├── cmd.txt + │   │   └── out.txt + │   └── time-range-1 + │   ├── cmd.txt + │   └── out.txt + └── cpu-1 + ├── time-range-0 + │   ├── cmd.txt + │   └── out.txt + └── time-range-1 + ├── cmd.txt + └── out.txt + $ find parallel-perf-output -name cmd.txt | sort | xargs grep -H . + parallel-perf-output/cpu-0/time-range-0/cmd.txt:perf script --cpu=0 --time=,9469.005396999 --ns + parallel-perf-output/cpu-0/time-range-1/cmd.txt:perf script --cpu=0 --time=9469.005397000, --ns + parallel-perf-output/cpu-1/time-range-0/cmd.txt:perf script --cpu=1 --time=,9469.005396999 --ns + parallel-perf-output/cpu-1/time-range-1/cmd.txt:perf script --cpu=1 --time=9469.005397000, --ns + +Subdivisions of time range, and cpus if the --per-cpu option is used, are +expressed by the --time and --cpu perf script options respectively. If the +supplied perf script command has a --time option, then that time range is +subdivided, otherwise the time range given by 'time of first sample' to +'time of last sample' is used (refer perf script --header-only). Similarly, the +supplied perf script command may provide a --cpu option, and only those CPUs +will be processed. + +To prevent time intervals becoming too small, the --min-interval option can +be used. + +Note there is special handling for processing Intel PT traces. If an interval is +not specified and the perf record command contained the intel_pt event, then the +time range will be subdivided in order to produce subdivisions that contain +approximately the same amount of trace data. That is accomplished by counting +double-quick (--itrace=qqi) samples, and choosing time ranges that encompass +approximately the same number of samples. In that case, time ranges may not be +the same for each CPU processed. For Intel PT, --per-cpu is the default, but +that can be overridden by --no-per-cpu. Note, for Intel PT, double-quick +decoding produces 1 sample for each PSB synchronization packet, which in turn +come after a certain number of bytes output, determined by psb_period (refer +perf Intel PT documentation). The minimum number of double-quick samples that +will define a time range can be set by the --min_size option, which defaults to +64. +""") + ap.add_argument("-o", "--output-dir", default="parallel-perf-output", help="output directory (default 'parallel-perf-output')") + ap.add_argument("-j", "--jobs", type=int, default=0, help="maximum number of jobs to run in parallel at one time (default is the number of CPUs)") + ap.add_argument("-n", "--nr", type=int, default=0, help="number of time subdivisions (default is the number of jobs)") + ap.add_argument("-i", "--interval", type=float, default=0, help="subdivide the time range using this time interval (in seconds e.g. 0.1 for a tenth of a second)") + ap.add_argument("-c", "--per-cpu", action="store_true", help="process data for each CPU in parallel") + ap.add_argument("-m", "--min-interval", type=float, default=glb_min_interval, help=f"minimum interval (default {glb_min_interval} seconds)") + ap.add_argument("-p", "--pipe-to", help="command to pipe output to (optional)") + ap.add_argument("-N", "--no-per-cpu", action="store_true", help="do not process data for each CPU in parallel") + ap.add_argument("-b", "--min_size", type=int, default=glb_min_samples, help="minimum data size (for Intel PT in PSBs)") + ap.add_argument("-D", "--dry-run", action="store_true", help="do not run any jobs, just show the perf script commands") + ap.add_argument("-q", "--quiet", action="store_true", help="do not print any messages except errors") + ap.add_argument("-v", "--verbose", action="store_true", help="print more messages") + ap.add_argument("-d", "--debug", action="store_true", help="print debugging messages") + cmd_line = list(args) + try: + split_pos = cmd_line.index("--") + cmd = cmd_line[split_pos + 1:] + args = cmd_line[:split_pos] + except: + cmd = None + args = cmd_line + a = ap.parse_args(args=args[1:]) + a.cmd = cmd + a.verbosity = Verbosity(a.quiet, a.verbose, a.debug) + try: + if a.cmd == None: + if len(args) <= 1: + ap.print_help() + return True + raise Exception("Command line must contain '--' before perf command") + return RunParallelPerf(a) + except Exception as e: + print("Fatal error: ", str(e)) + if a.debug: + raise + return False + +if __name__ == "__main__": + if not Main(sys.argv): + sys.exit(1) -- cgit v1.2.3 From a9700511fd50b9203a9a9d61b4874eb28571d5da Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 5 Jun 2024 16:44:42 +0200 Subject: perf script: netdev-times: add location parameter to consume_skb dd1b527831a3 ("net: add location to trace_consume_skb()") added a new parameter to the consume_skb tracepoint. Adapt the script to match. Signed-off-by: Lucas Stach Acked-by: Arnaldo Carvalho de Melo Cc: kernel@pengutronix.de Cc: patchwork-lst@pengutronix.de Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240605144442.1985270-1-l.stach@pengutronix.de --- tools/perf/scripts/python/netdev-times.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py index 00552eeb7178..30c4bccee5b2 100644 --- a/tools/perf/scripts/python/netdev-times.py +++ b/tools/perf/scripts/python/netdev-times.py @@ -293,7 +293,8 @@ def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, location, protocol, reason) all_event_list.append(event_info) -def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr): +def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, + skbaddr, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) -- cgit v1.2.3 From 7d49ced808b169c8cb754da15a69ed0b2fb26567 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Sun, 23 Jun 2024 12:18:50 +0530 Subject: tools/perf: Fix parallel-perf python script to replace new python syntax ":=" usage perf test "perf script tests" fails as below in systems with python 3.6 File "/home/athira/linux/tools/perf/tests/shell/../../scripts/python/parallel-perf.py", line 442 if line := p.stdout.readline(): ^ SyntaxError: invalid syntax --- Cleaning up --- ---- end(-1) ---- 92: perf script tests: FAILED! This happens because ":=" is a new syntax that assigns values to variables as part of a larger expression. This is introduced from python 3.8 and hence fails in setup with python 3.6 Address this by splitting the large expression and check the value in two steps: Previous line: if line := p.stdout.readline(): Current change: line = p.stdout.readline() if line: With patch ./perf test "perf script tests" 93: perf script tests: Ok Signed-off-by: Athira Rajeev Acked-by: Adrian Hunter Cc: akanksha@linux.ibm.com Cc: kjain@linux.ibm.com Cc: maddy@linux.ibm.com Cc: disgoel@linux.vnet.ibm.com Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240623064850.83720-3-atrajeev@linux.vnet.ibm.com --- tools/perf/scripts/python/parallel-perf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/parallel-perf.py b/tools/perf/scripts/python/parallel-perf.py index 21f32ec5ed46..be85fd7f6632 100755 --- a/tools/perf/scripts/python/parallel-perf.py +++ b/tools/perf/scripts/python/parallel-perf.py @@ -439,7 +439,8 @@ def ProcessCommandOutputLines(cmd, per_cpu, fn, *x): pat = re.compile(r"\s*\[[0-9]+\]") p = subprocess.Popen(cmd, stdout=subprocess.PIPE) while True: - if line := p.stdout.readline(): + line = p.stdout.readline() + if line: line = line.decode("utf-8") if pat.match(line): line = line.split() -- cgit v1.2.3 From e467705a9fb37f51595aa6deaca085ccb4005454 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 25 Jun 2024 14:41:15 -0700 Subject: perf util: Make util its own library MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the util directory into its own library. This is done to avoid compiling code twice, once for the perf tool and once for the perf python module. For convenience: arch/common.c scripts/perl/Perf-Trace-Util/Context.c scripts/python/Perf-Trace-Util/Context.c are made part of this library. Signed-off-by: Ian Rogers Reviewed-by: James Clark Cc: Suzuki K Poulose Cc: Kees Cook Cc: Palmer Dabbelt Cc: Albert Ou Cc: Nick Terrell Cc: Gary Guo Cc: Alex Gaynor Cc: Boqun Feng Cc: Wedson Almeida Filho Cc: Ze Gao Cc: Alice Ryhl Cc: Andrei Vagin Cc: Yicong Yang Cc: Jonathan Cameron Cc: Guo Ren Cc: Miguel Ojeda Cc: Will Deacon Cc: Mike Leach Cc: Leo Yan Cc: Oliver Upton Cc: John Garry Cc: Benno Lossin Cc: Björn Roy Baron Cc: Andreas Hindborg Cc: Paul Walmsley Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240625214117.953777-7-irogers@google.com --- tools/perf/scripts/python/Perf-Trace-Util/Build | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build index 5b0b5ff7e14a..be3710c61320 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/Build +++ b/tools/perf/scripts/python/Perf-Trace-Util/Build @@ -1,4 +1,4 @@ -perf-y += Context.o +perf-util-y += Context.o # -Wno-declaration-after-statement: The python headers have mixed code with declarations (decls after asserts, for instance) CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-declaration-after-statement -- cgit v1.2.3 From ae8e4f4048b839c1cb333d9e3d20e634b430139e Mon Sep 17 00:00:00 2001 From: James Clark Date: Tue, 23 Jul 2024 14:28:58 +0100 Subject: perf scripts python cs-etm: Restore first sample log in verbose mode The linked commit moved the early return on the first sample to before the verbose log, so move the log earlier too. Now the first sample is also logged and not skipped. Fixes: 2d98dbb4c9c5b09c ("perf scripts python arm-cs-trace-disasm.py: Do not ignore disam first sample") Reviewed-by: Leo Yan Signed-off-by: James Clark Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Benjamin Gray Cc: coresight@lists.linaro.org Cc: gankulkarni@os.amperecomputing.com Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ruidong Tian Cc: Suzuki Poulouse Link: https://lore.kernel.org/r/20240723132858.12747-1-james.clark@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index d973c2baed1c..7aff02d84ffb 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -192,17 +192,16 @@ def process_event(param_dict): ip = sample["ip"] addr = sample["addr"] + if (options.verbose == True): + print("Event type: %s" % name) + print_sample(sample) + # Initialize CPU data if it's empty, and directly return back # if this is the first tracing event for this CPU. if (cpu_data.get(str(cpu) + 'addr') == None): cpu_data[str(cpu) + 'addr'] = addr return - - if (options.verbose == True): - print("Event type: %s" % name) - print_sample(sample) - # If cannot find dso so cannot dump assembler, bail out if (dso == '[unknown]'): return -- cgit v1.2.3 From 9943581c64b1d1edaf9985ee81e45e728f67cd2e Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 16 Sep 2024 14:57:34 +0100 Subject: perf scripting python: Add function to get a config value This can be used to get config values like which objdump Perf uses for disassembly. Reviewed-by: Leo Yan Signed-off-by: James Clark Tested-by: Ganapatrao Kulkarni Cc: Ben Gainey Cc: Suzuki K Poulose Cc: Will Deacon Cc: Mathieu Poirier Cc: Mike Leach Cc: Ruidong Tian Cc: Leo Yan Cc: Benjamin Gray Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: John Garry Cc: scclevenger@os.amperecomputing.com Link: https://lore.kernel.org/r/20240916135743.1490403-4-james.clark@linaro.org Signed-off-by: Namhyung Kim --- tools/perf/scripts/python/Perf-Trace-Util/Context.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c index 3954bd1587ce..01f54d6724a5 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c @@ -12,6 +12,7 @@ #define PY_SSIZE_T_CLEAN #include +#include "../../../util/config.h" #include "../../../util/trace-event.h" #include "../../../util/event.h" #include "../../../util/symbol.h" @@ -182,6 +183,15 @@ static PyObject *perf_sample_srccode(PyObject *obj, PyObject *args) return perf_sample_src(obj, args, true); } +static PyObject *__perf_config_get(PyObject *obj, PyObject *args) +{ + const char *config_name; + + if (!PyArg_ParseTuple(args, "s", &config_name)) + return NULL; + return Py_BuildValue("s", perf_config_get(config_name)); +} + static PyMethodDef ContextMethods[] = { #ifdef HAVE_LIBTRACEEVENT { "common_pc", perf_trace_context_common_pc, METH_VARARGS, @@ -199,6 +209,7 @@ static PyMethodDef ContextMethods[] = { METH_VARARGS, "Get source file name and line number."}, { "perf_sample_srccode", perf_sample_srccode, METH_VARARGS, "Get source file name, line number and line."}, + { "perf_config_get", __perf_config_get, METH_VARARGS, "Get perf config entry"}, { NULL, NULL, 0, NULL} }; -- cgit v1.2.3 From 7b371afc9b67349c724c15d235924bc40694872a Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 16 Sep 2024 14:57:35 +0100 Subject: perf scripts python cs-etm: Update to use argparse optparse is deprecated and less flexible than argparse so update it. Reviewed-by: Leo Yan Signed-off-by: James Clark Tested-by: Ganapatrao Kulkarni Cc: Ben Gainey Cc: Suzuki K Poulose Cc: Will Deacon Cc: Mathieu Poirier Cc: Mike Leach Cc: Ruidong Tian Cc: Leo Yan Cc: Benjamin Gray Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: John Garry Cc: scclevenger@os.amperecomputing.com Link: https://lore.kernel.org/r/20240916135743.1490403-5-james.clark@linaro.org Signed-off-by: Namhyung Kim --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 28 +++++++++--------------- 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index 7aff02d84ffb..45f682a8b34d 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -11,7 +11,7 @@ import os from os import path import re from subprocess import * -from optparse import OptionParser, make_option +import argparse from perf_trace_context import perf_set_itrace_options, \ perf_sample_insn, perf_sample_srccode @@ -28,19 +28,11 @@ from perf_trace_context import perf_set_itrace_options, \ # perf script -s scripts/python/arm-cs-trace-disasm.py # Command line parsing. -option_list = [ - # formatting options for the bottom entry of the stack - make_option("-k", "--vmlinux", dest="vmlinux_name", - help="Set path to vmlinux file"), - make_option("-d", "--objdump", dest="objdump_name", - help="Set path to objdump executable file"), - make_option("-v", "--verbose", dest="verbose", - action="store_true", default=False, - help="Enable debugging log") -] - -parser = OptionParser(option_list=option_list) -(options, args) = parser.parse_args() +args = argparse.ArgumentParser() +args.add_argument("-k", "--vmlinux", help="Set path to vmlinux file") +args.add_argument("-d", "--objdump", help="Set path to objdump executable file"), +args.add_argument("-v", "--verbose", action="store_true", help="Enable debugging log") +options = args.parse_args() # Initialize global dicts and regular expression disasm_cache = dict() @@ -65,8 +57,8 @@ def get_offset(perf_dict, field): def get_dso_file_path(dso_name, dso_build_id): if (dso_name == "[kernel.kallsyms]" or dso_name == "vmlinux"): - if (options.vmlinux_name): - return options.vmlinux_name; + if (options.vmlinux): + return options.vmlinux; else: return dso_name @@ -92,7 +84,7 @@ def read_disam(dso_fname, dso_start, start_addr, stop_addr): else: start_addr = start_addr - dso_start; stop_addr = stop_addr - dso_start; - disasm = [ options.objdump_name, "-d", "-z", + disasm = [ options.objdump, "-d", "-z", "--start-address="+format(start_addr,"#x"), "--stop-address="+format(stop_addr,"#x") ] disasm += [ dso_fname ] @@ -256,7 +248,7 @@ def process_event(param_dict): print("Stop address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (stop_addr, int(dso_start), int(dso_end), dso)) return - if (options.objdump_name != None): + if (options.objdump != None): # It doesn't need to decrease virtual memory offset for disassembly # for kernel dso and executable file dso, so in this case we set # vm_start to zero. -- cgit v1.2.3 From 8286cc55a9a6f03d62bd140ce827025f9ed5e619 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 16 Sep 2024 14:57:36 +0100 Subject: perf scripts python cs-etm: Improve arguments Make vmlinux detection automatic and use Perf's default objdump when -d is specified. This will make it easier for a test to use the script without having to provide arguments. And similarly for users. Reviewed-by: Leo Yan Signed-off-by: James Clark Tested-by: Ganapatrao Kulkarni Cc: Ben Gainey Cc: Suzuki K Poulose Cc: Will Deacon Cc: Mathieu Poirier Cc: Mike Leach Cc: Ruidong Tian Cc: Leo Yan Cc: Benjamin Gray Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: John Garry Cc: scclevenger@os.amperecomputing.com Link: https://lore.kernel.org/r/20240916135743.1490403-6-james.clark@linaro.org Signed-off-by: Namhyung Kim --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 63 +++++++++++++++++++++--- 1 file changed, 55 insertions(+), 8 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index 45f682a8b34d..02e957d037ea 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -12,25 +12,48 @@ from os import path import re from subprocess import * import argparse +import platform -from perf_trace_context import perf_set_itrace_options, \ - perf_sample_insn, perf_sample_srccode +from perf_trace_context import perf_sample_srccode, perf_config_get # Below are some example commands for using this script. +# Note a --kcore recording is required for accurate decode +# due to the alternatives patching mechanism. However this +# script only supports reading vmlinux for disassembly dump, +# meaning that any patched instructions will appear +# as unpatched, but the instruction ranges themselves will +# be correct. In addition to this, source line info comes +# from Perf, and when using kcore there is no debug info. The +# following lists the supported features in each mode: +# +# +-----------+-----------------+------------------+------------------+ +# | Recording | Accurate decode | Source line dump | Disassembly dump | +# +-----------+-----------------+------------------+------------------+ +# | --kcore | yes | no | yes | +# | normal | no | yes | yes | +# +-----------+-----------------+------------------+------------------+ +# +# Output disassembly with objdump and auto detect vmlinux +# (when running on same machine.) +# perf script -s scripts/python/arm-cs-trace-disasm.py -d # -# Output disassembly with objdump: -# perf script -s scripts/python/arm-cs-trace-disasm.py \ -# -- -d objdump -k path/to/vmlinux # Output disassembly with llvm-objdump: # perf script -s scripts/python/arm-cs-trace-disasm.py \ # -- -d llvm-objdump-11 -k path/to/vmlinux +# # Output only source line and symbols: # perf script -s scripts/python/arm-cs-trace-disasm.py +def default_objdump(): + config = perf_config_get("annotate.objdump") + return config if config else "objdump" + # Command line parsing. args = argparse.ArgumentParser() -args.add_argument("-k", "--vmlinux", help="Set path to vmlinux file") -args.add_argument("-d", "--objdump", help="Set path to objdump executable file"), +args.add_argument("-k", "--vmlinux", + help="Set path to vmlinux file. Omit to autodetect if running on same machine") +args.add_argument("-d", "--objdump", nargs="?", const=default_objdump(), + help="Show disassembly. Can also be used to change the objdump path"), args.add_argument("-v", "--verbose", action="store_true", help="Enable debugging log") options = args.parse_args() @@ -45,6 +68,17 @@ glb_source_file_name = None glb_line_number = None glb_dso = None +kver = platform.release() +vmlinux_paths = [ + f"/usr/lib/debug/boot/vmlinux-{kver}.debug", + f"/usr/lib/debug/lib/modules/{kver}/vmlinux", + f"/lib/modules/{kver}/build/vmlinux", + f"/usr/lib/debug/boot/vmlinux-{kver}", + f"/boot/vmlinux-{kver}", + f"/boot/vmlinux", + f"vmlinux" +] + def get_optional(perf_dict, field): if field in perf_dict: return perf_dict[field] @@ -55,12 +89,25 @@ def get_offset(perf_dict, field): return "+%#x" % perf_dict[field] return "" +def find_vmlinux(): + if hasattr(find_vmlinux, "path"): + return find_vmlinux.path + + for v in vmlinux_paths: + if os.access(v, os.R_OK): + find_vmlinux.path = v + break + else: + find_vmlinux.path = None + + return find_vmlinux.path + def get_dso_file_path(dso_name, dso_build_id): if (dso_name == "[kernel.kallsyms]" or dso_name == "vmlinux"): if (options.vmlinux): return options.vmlinux; else: - return dso_name + return find_vmlinux() if find_vmlinux() else dso_name if (dso_name == "[vdso]") : append = "/vdso" -- cgit v1.2.3 From 66dd3b539efe0d4b44324c1fe39978db8111ed93 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 16 Sep 2024 14:57:37 +0100 Subject: perf scripts python cs-etm: Add start and stop arguments Make it possible to only disassemble a range of timestamps or sample indexes. This will be used by the test to limit the runtime, but it's also useful for users. Reviewed-by: Leo Yan Signed-off-by: James Clark Tested-by: Ganapatrao Kulkarni Cc: Ben Gainey Cc: Suzuki K Poulose Cc: Will Deacon Cc: Mathieu Poirier Cc: Mike Leach Cc: Ruidong Tian Cc: Leo Yan Cc: Benjamin Gray Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: John Garry Cc: scclevenger@os.amperecomputing.com Link: https://lore.kernel.org/r/20240916135743.1490403-7-james.clark@linaro.org Signed-off-by: Namhyung Kim --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 40 ++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index 02e957d037ea..1128d259b4f4 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -49,13 +49,36 @@ def default_objdump(): return config if config else "objdump" # Command line parsing. +def int_arg(v): + v = int(v) + if v < 0: + raise argparse.ArgumentTypeError("Argument must be a positive integer") + return v + args = argparse.ArgumentParser() args.add_argument("-k", "--vmlinux", help="Set path to vmlinux file. Omit to autodetect if running on same machine") args.add_argument("-d", "--objdump", nargs="?", const=default_objdump(), help="Show disassembly. Can also be used to change the objdump path"), args.add_argument("-v", "--verbose", action="store_true", help="Enable debugging log") +args.add_argument("--start-time", type=int_arg, help="Monotonic clock time of sample to start from. " + "See 'time' field on samples in -v mode.") +args.add_argument("--stop-time", type=int_arg, help="Monotonic clock time of sample to stop at. " + "See 'time' field on samples in -v mode.") +args.add_argument("--start-sample", type=int_arg, help="Index of sample to start from. " + "See 'index' field on samples in -v mode.") +args.add_argument("--stop-sample", type=int_arg, help="Index of sample to stop at. " + "See 'index' field on samples in -v mode.") + options = args.parse_args() +if (options.start_time and options.stop_time and + options.start_time >= options.stop_time): + print("--start-time must less than --stop-time") + exit(2) +if (options.start_sample and options.stop_sample and + options.start_sample >= options.stop_sample): + print("--start-sample must less than --stop-sample") + exit(2) # Initialize global dicts and regular expression disasm_cache = dict() @@ -63,6 +86,7 @@ cpu_data = dict() disasm_re = re.compile(r"^\s*([0-9a-fA-F]+):") disasm_func_re = re.compile(r"^\s*([0-9a-fA-F]+)\s.*:") cache_size = 64*1024 +sample_idx = -1 glb_source_file_name = None glb_line_number = None @@ -151,10 +175,10 @@ def print_disam(dso_fname, dso_start, start_addr, stop_addr): def print_sample(sample): print("Sample = { cpu: %04d addr: 0x%016x phys_addr: 0x%016x ip: 0x%016x " \ - "pid: %d tid: %d period: %d time: %d }" % \ + "pid: %d tid: %d period: %d time: %d index: %d}" % \ (sample['cpu'], sample['addr'], sample['phys_addr'], \ sample['ip'], sample['pid'], sample['tid'], \ - sample['period'], sample['time'])) + sample['period'], sample['time'], sample_idx)) def trace_begin(): print('ARM CoreSight Trace Data Assembler Dump') @@ -216,6 +240,7 @@ def print_srccode(comm, param_dict, sample, symbol, dso): def process_event(param_dict): global cache_size global options + global sample_idx sample = param_dict["sample"] comm = param_dict["comm"] @@ -231,6 +256,17 @@ def process_event(param_dict): ip = sample["ip"] addr = sample["addr"] + sample_idx += 1 + + if (options.start_time and sample["time"] < options.start_time): + return + if (options.stop_time and sample["time"] > options.stop_time): + exit(0) + if (options.start_sample and sample_idx < options.start_sample): + return + if (options.stop_sample and sample_idx > options.stop_sample): + exit(0) + if (options.verbose == True): print("Event type: %s" % name) print_sample(sample) -- cgit v1.2.3 From e8328bf3cd135b5f443bed77f3791ac1633ae01e Mon Sep 17 00:00:00 2001 From: Steve Clevenger Date: Fri, 8 Nov 2024 12:11:17 -0700 Subject: perf script python: Adjust objdump start/end per map pgoff parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extract map_pgoff parameter from the dictionary, and adjust start/end range passed to objdump based on the value. A zero start_addr is filtered to prevent output of dso address range check failures. This script repeatedly sees a zero value passed in for       start_addr = cpu_data[str(cpu) + 'addr'] These zero values are not a new problem. The start_addr/stop_addr warning clutters the instruction trace output, hence this change. Signed-off-by: Steve Clevenger Reviewed-by: Leo Yan Cc: suzuki.poulose@arm.com Cc: james.clark@linaro.org Cc: mike.leach@linaro.org Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: ilkka@os.amperecomputing.com Link: https://lore.kernel.org/r/21ccdd22e664bdeccb878672d4b2c0518873c1e5.1731027120.git.scclevenger@os.amperecomputing.com Signed-off-by: Namhyung Kim --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index 1128d259b4f4..ba208c90d631 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -251,6 +251,10 @@ def process_event(param_dict): dso_start = get_optional(param_dict, "dso_map_start") dso_end = get_optional(param_dict, "dso_map_end") symbol = get_optional(param_dict, "symbol") + map_pgoff = get_optional(param_dict, "map_pgoff") + # check for valid map offset + if (str(map_pgoff) == '[unknown]'): + map_pgoff = 0 cpu = sample["cpu"] ip = sample["ip"] @@ -318,9 +322,10 @@ def process_event(param_dict): # Record for previous sample packet cpu_data[str(cpu) + 'addr'] = addr - # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4 - if (start_addr == 0 and stop_addr == 4): - print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu) + # Filter out zero start_address. Optionally identify CS_ETM_TRACE_ON packet + if (start_addr == 0): + if ((stop_addr == 4) and (options.verbose == True)): + print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu) return if (start_addr < int(dso_start) or start_addr > int(dso_end)): @@ -337,13 +342,14 @@ def process_event(param_dict): # vm_start to zero. if (dso == "[kernel.kallsyms]" or dso_start == 0x400000): dso_vm_start = 0 + map_pgoff = 0 else: dso_vm_start = int(dso_start) dso_fname = get_dso_file_path(dso, dso_bid) if path.exists(dso_fname): - print_disam(dso_fname, dso_vm_start, start_addr, stop_addr) + print_disam(dso_fname, dso_vm_start, start_addr + map_pgoff, stop_addr + map_pgoff) else: - print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr, stop_addr)) + print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr + map_pgoff, stop_addr + map_pgoff)) print_srccode(comm, param_dict, sample, symbol, dso) -- cgit v1.2.3 From d78e20c081e744812cba9d12933a0afe5bc09e61 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 19 Nov 2024 10:01:30 -0800 Subject: perf script python: Improve physical mem type resolution Previously system RAM and persistent memory were hard code matched, change so that the label of the memory region is just read from /proc/iomem. This avoids frequent N/A samples. Change the /proc/iomem reading, event processing and output so that nested entries appear and their counts count toward their parent. As labels may be repeated, include the memory ranges in the output to make it clear why, for example, "System RAM" appears twice. Before: Event: mem_inst_retired.all_loads:P Memory type count percentage ---------------------------------------- ---------- ---------- System RAM 9460 96.5% N/A 998 3.5% After: Event: mem_inst_retired.all_loads:P Memory type count percentage ---------------------------------------- ---------- ---------- 100000000-105f7fffff : System RAM 36741 96.5 841400000-8416599ff : Kernel data 89 0.2 840800000-8412a6fff : Kernel rodata 60 0.2 841ebe000-8423fffff : Kernel bss 34 0.1 0-fff : Reserved 1345 3.5 100000-89dd9fff : System RAM 2 0.0 Before: Event: mem_inst_retired.any:P Memory type count percentage ---------------------------------------- ----------- ----------- System RAM 9460 90.5% N/A 998 9.5% After: Event: mem_inst_retired.any:P Memory type count percentage ---------------------------------------- ---------- ---------- 100000000-105f7fffff : System RAM 9460 90.5 841400000-8416599ff : Kernel data 45 0.4 840800000-8412a6fff : Kernel rodata 19 0.2 841ebe000-8423fffff : Kernel bss 12 0.1 0-fff : Reserved 998 9.5 The code has been updated to python 3 with type hints and resolving issues reported by mypy and pylint. Tabs are swapped to spaces as preferred in PEP8, because most lines of code were modified (of this small file) and this makes pylint significantly less noisy. Committer testing: root@number:/tmp# grep -m1 "model name" /proc/cpuinfo model name : Intel(R) Core(TM) i7-14700K root@number:/tmp# root@number:/tmp# perf script mem-phys-addr -a find / /bin /lib /lib64 /sbin Warning: 744 out of order events recorded. Event: cpu_core/mem_inst_retired.all_loads/P Memory type count percentage ---------------------------------------- ---------- ---------- 100000000-8bfbfffff : System RAM 364561 76.5 621400000-6223a6fff : Kernel rodata 10474 2.2 622400000-62283d4bf : Kernel data 4828 1.0 623304000-6237fffff : Kernel bss 1063 0.2 620000000-6213fffff : Kernel code 98 0.0 0-fff : Reserved 111480 23.4 100000-2b0ca017 : System RAM 337 0.1 2fbad000-30d92fff : System RAM 44 0.0 2c79d000-2fbabfff : System RAM 30 0.0 30d94000-316d5fff : System RAM 16 0.0 2b131a58-2c71dfff : System RAM 7 0.0 root@number:/tmp# Signed-off-by: Ian Rogers Acked-by: Kan Liang Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20241119180130.19160-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/mem-phys-addr.py | 177 +++++++++++++++++------------ 1 file changed, 102 insertions(+), 75 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py index 1f332e72b9b0..5e237a5a5f1b 100644 --- a/tools/perf/scripts/python/mem-phys-addr.py +++ b/tools/perf/scripts/python/mem-phys-addr.py @@ -3,98 +3,125 @@ # # Copyright (c) 2018, Intel Corporation. -from __future__ import division -from __future__ import print_function - import os import sys -import struct import re import bisect import collections +from dataclasses import dataclass +from typing import (Dict, Optional) sys.path.append(os.environ['PERF_EXEC_PATH'] + \ - '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +@dataclass(frozen=True) +class IomemEntry: + """Read from a line in /proc/iomem""" + begin: int + end: int + indent: int + label: str -#physical address ranges for System RAM -system_ram = [] -#physical address ranges for Persistent Memory -pmem = [] -#file object for proc iomem -f = None -#Count for each type of memory -load_mem_type_cnt = collections.Counter() -#perf event name -event_name = None +# Physical memory layout from /proc/iomem. Key is the indent and then +# a list of ranges. +iomem: Dict[int, list[IomemEntry]] = collections.defaultdict(list) +# Child nodes from the iomem parent. +children: Dict[IomemEntry, set[IomemEntry]] = collections.defaultdict(set) +# Maximum indent seen before an entry in the iomem file. +max_indent: int = 0 +# Count for each range of memory. +load_mem_type_cnt: Dict[IomemEntry, int] = collections.Counter() +# Perf event name set from the first sample in the data. +event_name: Optional[str] = None def parse_iomem(): - global f - f = open('/proc/iomem', 'r') - for i, j in enumerate(f): - m = re.split('-|:',j,2) - if m[2].strip() == 'System RAM': - system_ram.append(int(m[0], 16)) - system_ram.append(int(m[1], 16)) - if m[2].strip() == 'Persistent Memory': - pmem.append(int(m[0], 16)) - pmem.append(int(m[1], 16)) + """Populate iomem from /proc/iomem file""" + global iomem + global max_indent + global children + with open('/proc/iomem', 'r', encoding='ascii') as f: + for line in f: + indent = 0 + while line[indent] == ' ': + indent += 1 + if indent > max_indent: + max_indent = indent + m = re.split('-|:', line, 2) + begin = int(m[0], 16) + end = int(m[1], 16) + label = m[2].strip() + entry = IomemEntry(begin, end, indent, label) + # Before adding entry, search for a parent node using its begin. + if indent > 0: + parent = find_memory_type(begin) + assert parent, f"Given indent expected a parent for {label}" + children[parent].add(entry) + iomem[indent].append(entry) -def print_memory_type(): - print("Event: %s" % (event_name)) - print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='') - print("%-40s %10s %10s\n" % ("----------------------------------------", - "-----------", "-----------"), - end=''); - total = sum(load_mem_type_cnt.values()) - for mem_type, count in sorted(load_mem_type_cnt.most_common(), \ - key = lambda kv: (kv[1], kv[0]), reverse = True): - print("%-40s %10d %10.1f%%\n" % - (mem_type, count, 100 * count / total), - end='') +def find_memory_type(phys_addr) -> Optional[IomemEntry]: + """Search iomem for the range containing phys_addr with the maximum indent""" + for i in range(max_indent, -1, -1): + if i not in iomem: + continue + position = bisect.bisect_right(iomem[i], phys_addr, + key=lambda entry: entry.begin) + if position is None: + continue + iomem_entry = iomem[i][position-1] + if iomem_entry.begin <= phys_addr <= iomem_entry.end: + return iomem_entry + print(f"Didn't find {phys_addr}") + return None -def trace_begin(): - parse_iomem() +def print_memory_type(): + print(f"Event: {event_name}") + print(f"{'Memory type':<40} {'count':>10} {'percentage':>10}") + print(f"{'-' * 40:<40} {'-' * 10:>10} {'-' * 10:>10}") + total = sum(load_mem_type_cnt.values()) + # Add count from children into the parent. + for i in range(max_indent, -1, -1): + if i not in iomem: + continue + for entry in iomem[i]: + global children + for child in children[entry]: + if load_mem_type_cnt[child] > 0: + load_mem_type_cnt[entry] += load_mem_type_cnt[child] -def trace_end(): - print_memory_type() - f.close() + def print_entries(entries): + """Print counts from parents down to their children""" + global children + for entry in sorted(entries, + key = lambda entry: load_mem_type_cnt[entry], + reverse = True): + count = load_mem_type_cnt[entry] + if count > 0: + mem_type = ' ' * entry.indent + f"{entry.begin:x}-{entry.end:x} : {entry.label}" + percent = 100 * count / total + print(f"{mem_type:<40} {count:>10} {percent:>10.1f}") + print_entries(children[entry]) -def is_system_ram(phys_addr): - #/proc/iomem is sorted - position = bisect.bisect(system_ram, phys_addr) - if position % 2 == 0: - return False - return True + print_entries(iomem[0]) -def is_persistent_mem(phys_addr): - position = bisect.bisect(pmem, phys_addr) - if position % 2 == 0: - return False - return True +def trace_begin(): + parse_iomem() -def find_memory_type(phys_addr): - if phys_addr == 0: - return "N/A" - if is_system_ram(phys_addr): - return "System RAM" +def trace_end(): + print_memory_type() - if is_persistent_mem(phys_addr): - return "Persistent Memory" +def process_event(param_dict): + if "sample" not in param_dict: + return - #slow path, search all - f.seek(0, 0) - for j in f: - m = re.split('-|:',j,2) - if int(m[0], 16) <= phys_addr <= int(m[1], 16): - return m[2] - return "N/A" + sample = param_dict["sample"] + if "phys_addr" not in sample: + return -def process_event(param_dict): - name = param_dict["ev_name"] - sample = param_dict["sample"] - phys_addr = sample["phys_addr"] + phys_addr = sample["phys_addr"] + entry = find_memory_type(phys_addr) + if entry: + load_mem_type_cnt[entry] += 1 - global event_name - if event_name == None: - event_name = name - load_mem_type_cnt[find_memory_type(phys_addr)] += 1 + global event_name + if event_name is None: + event_name = param_dict["ev_name"] -- cgit v1.2.3 From e7e9943c87d857da650f228fdf6cb47b785b3ff9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 18 Nov 2024 17:16:23 -0800 Subject: perf python: Remove python 2 scripting support Python2 was deprecated 4 years ago, remove support and workarounds. Signed-off-by: Ian Rogers Acked-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Colin Ian King Cc: Dapeng Mi Cc: Howard Chu Cc: Ilya Leoshkevich Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Kan Liang Cc: Mark Rutland Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Cc: Veronika Molnarova Cc: Weilin Wang Link: https://lore.kernel.org/r/20241119011644.971342-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/Perf-Trace-Util/Context.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c index 01f54d6724a5..d742daaa5d5a 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c @@ -24,16 +24,6 @@ #include "../../../util/srcline.h" #include "../../../util/srccode.h" -#if PY_MAJOR_VERSION < 3 -#define _PyCapsule_GetPointer(arg1, arg2) \ - PyCObject_AsVoidPtr(arg1) -#define _PyBytes_FromStringAndSize(arg1, arg2) \ - PyString_FromStringAndSize((arg1), (arg2)) -#define _PyUnicode_AsUTF8(arg) \ - PyString_AsString(arg) - -PyMODINIT_FUNC initperf_trace_context(void); -#else #define _PyCapsule_GetPointer(arg1, arg2) \ PyCapsule_GetPointer((arg1), (arg2)) #define _PyBytes_FromStringAndSize(arg1, arg2) \ @@ -42,7 +32,6 @@ PyMODINIT_FUNC initperf_trace_context(void); PyUnicode_AsUTF8(arg) PyMODINIT_FUNC PyInit_perf_trace_context(void); -#endif static struct scripting_context *get_args(PyObject *args, const char *name, PyObject **arg2) { @@ -213,12 +202,6 @@ static PyMethodDef ContextMethods[] = { { NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION < 3 -PyMODINIT_FUNC initperf_trace_context(void) -{ - (void) Py_InitModule("perf_trace_context", ContextMethods); -} -#else PyMODINIT_FUNC PyInit_perf_trace_context(void) { static struct PyModuleDef moduledef = { @@ -240,4 +223,3 @@ PyMODINIT_FUNC PyInit_perf_trace_context(void) return mod; } -#endif -- cgit v1.2.3 From 1ff2ca39b39f2ff5718a74bc4c92183b8eb9763f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 18 Nov 2024 17:16:32 -0800 Subject: perf script: Move script_fetch_insn to trace-event-scripting.c Add native_arch as a parameter to script_fetch_insn rather than relying on the builtin-script value that won't be initialized for the dlfilter and python Context use cases. Assume both of those cases are running natively. Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Acked-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Colin Ian King Cc: Dapeng Mi Cc: Howard Chu Cc: Ilya Leoshkevich Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Kan Liang Cc: Mark Rutland Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Cc: Veronika Molnarova Cc: Weilin Wang Link: https://lore.kernel.org/r/20241119011644.971342-11-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/Perf-Trace-Util/Context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/perf/scripts/python') diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c index d742daaa5d5a..60dcfe56d4d9 100644 --- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c @@ -93,7 +93,7 @@ static PyObject *perf_sample_insn(PyObject *obj, PyObject *args) if (c->sample->ip && !c->sample->insn_len && thread__maps(c->al->thread)) { struct machine *machine = maps__machine(thread__maps(c->al->thread)); - script_fetch_insn(c->sample, c->al->thread, machine); + script_fetch_insn(c->sample, c->al->thread, machine, /*native_arch=*/true); } if (!c->sample->insn_len) Py_RETURN_NONE; /* N.B. This is a return statement */ -- cgit v1.2.3