summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ci/xfails/update-xfails.py
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-21 14:56:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-21 14:56:17 -0800
commit28eb75e178d389d325f1666e422bc13bbbb9804c (patch)
tree20417b4e798f98fc5687e80c1e0126afcf437c70 /drivers/gpu/drm/ci/xfails/update-xfails.py
parent071b34dcf71523a559b6c39f5d21a268a9531b50 (diff)
parenta163b895077861598be48c1cf7f4a88413c28b22 (diff)
Merge tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie: "There's a lot of rework, the panic helper support is being added to more drivers, v3d gets support for HW superpages, scheduler documentation, drm client and video aperture reworks, some new MAINTAINERS added, amdgpu has the usual lots of IP refactors, Intel has some Pantherlake enablement and xe is getting some SRIOV bits, but just lots of stuff everywhere. core: - split DSC helpers from DP helpers - clang build fixes for drm/mm test - drop simple pipeline support for gem vram - document submission error signaling - move drm_rect to drm core module from kms helper - add default client setup to most drivers - move to video aperture helpers instead of drm ones tests: - new framebuffer tests ttm: - remove swapped and pinned BOs from TTM lru panic: - fix uninit spinlock - add ABGR2101010 support bridge: - add TI TDP158 support - use standard PM OPS dma-fence: - use read_trylock instead of read_lock to help lockdep scheduler: - add errno to sched start to report different errors - add locking to drm_sched_entity_modify_sched - improve documentation xe: - add drm_line_printer - lots of refactoring - Enable Xe2 + PES disaggregation - add new ARL PCI ID - SRIOV development work - fix exec unnecessary implicit fence - define and parse OA sync props - forcewake refactoring i915: - Enable BMG/LNL ultra joiner - Enable 10bpx + CCS scanout on ICL+, fp16/CCS on TGL+ - use DSB for plane/color mgmt - Arrow lake PCI IDs - lots of i915/xe display refactoring - enable PXP GuC autoteardown - Pantherlake (PTL) Xe3 LPD display enablement - Allow fastset HDR infoframe changes - write DP source OUI for non-eDP sinks - share PCI IDs between i915 and xe amdgpu: - SDMA queue reset support - SMU 13.0.6, JPEG 4.0.3 updates - Initial runtime repartitioning support - rework IP structs for multiple IP instances - Fetch EDID from _DDC if available - SMU13 zero rpm user control - lots of fixes/cleanups amdkfd: - Increase event FIFO size - add topology cap flag for per queue reset msm: - DPU: - SA8775P support - (disabled by default) MSM8917, MSM8937, MSM8953 and MSM8996 support - Enable large framebuffer support - Drop MSM8998 and SDM845 - DP: - SA8775P support - GPU: - a7xx preemption support - Adreno A663 support ast: - warn about unsupported TX chips ivpu: - add coredump - add pantherlake support rockchip: - 4K@60Hz display enablement - generate pll programming tables panthor: - add timestamp query API - add realtime group priority - add fdinfo support etnaviv: - improve handling of DMA address limits - improve GPU hangcheck exynos: - Decon Exynos7870 support mediatek: - add OF graph support omap: - locking fixes bochs: - convert to gem/shmem from simpledrm v3d: - support big/super pages - add gemfs vc4: - BCM2712 support refactoring - add YUV444 format support udmabuf: - folio related fixes nouveau: - add panic support on nv50+" * tag 'drm-next-2024-11-21' of https://gitlab.freedesktop.org/drm/kernel: (1583 commits) drm/xe/guc: Fix dereference before NULL check drm/amd: Fix initialization mistake for NBIO 7.7.0 Revert "drm/amd/display: parse umc_info or vram_info based on ASIC" drm/amd/display: Fix failure to read vram info due to static BP_RESULT drm/amdgpu: enable GTT fallback handling for dGPUs only drm/amd/amdgpu: limit single process inside MES drm/fourcc: add AMD_FMT_MOD_TILE_GFX9_4K_D_X drm/amdgpu/mes12: correct kiq unmap latency drm/amdgpu: Support vcn and jpeg error info parsing drm/amd : Update MES API header file for v11 & v12 drm/amd/amdkfd: add/remove kfd queues on start/stop KFD scheduling drm/amdkfd: change kfd process kref count at creation drm/amdgpu: Cleanup shift coding style drm/amd/amdgpu: Increase MES log buffer to dump mes scratch data drm/amdgpu: Implement virt req_ras_err_count drm/amdgpu: VF Query RAS Caps from Host if supported drm/amdgpu: Add msg handlers for SRIOV RAS Telemetry drm/amdgpu: Update SRIOV Exchange Headers for RAS Telemetry Support drm/amd/display: 3.2.309 drm/amd/display: Adjust VSDB parser for replay feature ...
Diffstat (limited to 'drivers/gpu/drm/ci/xfails/update-xfails.py')
-rwxr-xr-xdrivers/gpu/drm/ci/xfails/update-xfails.py204
1 files changed, 0 insertions, 204 deletions
diff --git a/drivers/gpu/drm/ci/xfails/update-xfails.py b/drivers/gpu/drm/ci/xfails/update-xfails.py
deleted file mode 100755
index a446e98d72a1..000000000000
--- a/drivers/gpu/drm/ci/xfails/update-xfails.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-from collections import defaultdict
-import difflib
-import os
-import re
-from glcollate import Collate
-from termcolor import colored
-from urllib.parse import urlparse
-
-
-def get_canonical_name(job_name):
- return re.split(r" \d+/\d+", job_name)[0]
-
-
-def get_xfails_file_path(job_name, suffix):
- canonical_name = get_canonical_name(job_name)
- name = canonical_name.replace(":", "-")
- script_dir = os.path.dirname(os.path.abspath(__file__))
- return os.path.join(script_dir, f"{name}-{suffix}.txt")
-
-
-def get_unit_test_name_and_results(unit_test):
- if "Artifact results/failures.csv not found" in unit_test or '' == unit_test:
- return None, None
- unit_test_name, unit_test_result = unit_test.strip().split(",")
- return unit_test_name, unit_test_result
-
-
-def read_file(file_path):
- try:
- with open(file_path, "r") as file:
- f = file.readlines()
- if len(f):
- f[-1] = f[-1].strip() + "\n"
- return f
- except FileNotFoundError:
- return []
-
-
-def save_file(content, file_path):
- # delete file is content is empty
- if not content or not any(content):
- if os.path.exists(file_path):
- os.remove(file_path)
- return
-
- with open(file_path, "w") as file:
- file.writelines(content)
-
-
-def is_test_present_on_file(file_content, unit_test_name):
- return any(unit_test_name in line for line in file_content)
-
-
-def is_unit_test_present_in_other_jobs(unit_test, job_ids):
- return all(unit_test in job_ids[job_id] for job_id in job_ids)
-
-
-def remove_unit_test_if_present(lines, unit_test_name):
- if not is_test_present_on_file(lines, unit_test_name):
- return
- lines[:] = [line for line in lines if unit_test_name not in line]
-
-
-def add_unit_test_if_not_present(lines, unit_test_name, file_name):
- # core_getversion is mandatory
- if "core_getversion" in unit_test_name:
- print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name))
- elif all(unit_test_name not in line for line in lines):
- lines.append(unit_test_name + "\n")
-
-
-def update_unit_test_result_in_fails_txt(fails_txt, unit_test):
- unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
- for i, line in enumerate(fails_txt):
- if unit_test_name in line:
- _, current_result = get_unit_test_name_and_results(line)
- fails_txt[i] = unit_test + "\n"
- return
-
-
-def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path):
- unit_test_name, _ = get_unit_test_name_and_results(unit_test)
- if not is_test_present_on_file(fails_txt, unit_test_name):
- add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path)
- # if it is present but not with the same result
- elif not is_test_present_on_file(fails_txt, unit_test):
- update_unit_test_result_in_fails_txt(fails_txt, unit_test)
-
-
-def split_unit_test_from_collate(xfails):
- for job_name in xfails.keys():
- for job_id in xfails[job_name].copy().keys():
- if "not found" in xfails[job_name][job_id].content_as_str:
- del xfails[job_name][job_id]
- continue
- xfails[job_name][job_id] = xfails[job_name][job_id].content_as_str.splitlines()
-
-
-def get_xfails_from_pipeline_url(pipeline_url):
- parsed_url = urlparse(pipeline_url)
- path_components = parsed_url.path.strip("/").split("/")
-
- namespace = path_components[0]
- project = path_components[1]
- pipeline_id = path_components[-1]
-
- print("Collating from:", namespace, project, pipeline_id)
- xfails = (
- Collate(namespace=namespace, project=project)
- .from_pipeline(pipeline_id)
- .get_artifact("results/failures.csv")
- )
-
- split_unit_test_from_collate(xfails)
- return xfails
-
-
-def get_xfails_from_pipeline_urls(pipelines_urls):
- xfails = defaultdict(dict)
-
- for url in pipelines_urls:
- new_xfails = get_xfails_from_pipeline_url(url)
- for key in new_xfails:
- xfails[key].update(new_xfails[key])
-
- return xfails
-
-
-def print_diff(old_content, new_content, file_name):
- diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name)
- diff = [colored(line, "green") if line.startswith("+") else
- colored(line, "red") if line.startswith("-") else line for line in diff]
- print("\n".join(diff[:3]))
- print("".join(diff[3:]))
-
-
-def main(pipelines_urls, only_flakes):
- xfails = get_xfails_from_pipeline_urls(pipelines_urls)
-
- for job_name in xfails.keys():
- fails_txt_path = get_xfails_file_path(job_name, "fails")
- flakes_txt_path = get_xfails_file_path(job_name, "flakes")
-
- fails_txt = read_file(fails_txt_path)
- flakes_txt = read_file(flakes_txt_path)
-
- fails_txt_original = fails_txt.copy()
- flakes_txt_original = flakes_txt.copy()
-
- for job_id in xfails[job_name].keys():
- for unit_test in xfails[job_name][job_id]:
- unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
-
- if not unit_test_name:
- continue
-
- if only_flakes:
- remove_unit_test_if_present(fails_txt, unit_test_name)
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # drop it from flakes if it is present to analyze it again
- remove_unit_test_if_present(flakes_txt, unit_test_name)
-
- if unit_test_result == "UnexpectedPass":
- remove_unit_test_if_present(fails_txt, unit_test_name)
- # flake result
- if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # flake result
- if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
- remove_unit_test_if_present(fails_txt, unit_test_name)
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # consistent result
- add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test,
- fails_txt_path)
-
- fails_txt.sort()
- flakes_txt.sort()
-
- if fails_txt != fails_txt_original:
- save_file(fails_txt, fails_txt_path)
- print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path))
- if flakes_txt != flakes_txt_original:
- save_file(flakes_txt, flakes_txt_path)
- print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.")
- parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.")
- parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.")
-
- args = parser.parse_args()
-
- main(args.pipeline_urls, args.only_flakes)
- print("Done.")